diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-14 13:52:09 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-14 13:52:09 -0500 |
commit | 37dc79565c4b7e735f190eaa6ed5bb6eb3d3968a (patch) | |
tree | 4f20cc3c9240c5759f72bf560b596a809173ee29 | |
parent | 894025f24bd028942da3e602b87d9f7223109b14 (diff) | |
parent | 1d9ddde12e3c9bab7f3d3484eb9446315e3571ca (diff) |
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
"Here is the crypto update for 4.15:
API:
- Disambiguate EBUSY when queueing crypto request by adding ENOSPC.
This change touches code outside the crypto API.
- Reset settings when empty string is written to rng_current.
Algorithms:
- Add OSCCA SM3 secure hash.
Drivers:
- Remove old mv_cesa driver (replaced by marvell/cesa).
- Enable rfc3686/ecb/cfb/ofb AES in crypto4xx.
- Add ccm/gcm AES in crypto4xx.
- Add support for BCM7278 in iproc-rng200.
- Add hash support on Exynos in s5p-sss.
- Fix fallback-induced error in vmx.
- Fix output IV in atmel-aes.
- Fix empty GCM hash in mediatek.
Others:
- Fix DoS potential in lib/mpi.
- Fix potential out-of-order issues with padata"
* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (162 commits)
lib/mpi: call cond_resched() from mpi_powm() loop
crypto: stm32/hash - Fix return issue on update
crypto: dh - Remove pointless checks for NULL 'p' and 'g'
crypto: qat - Clean up error handling in qat_dh_set_secret()
crypto: dh - Don't permit 'key' or 'g' size longer than 'p'
crypto: dh - Don't permit 'p' to be 0
crypto: dh - Fix double free of ctx->p
hwrng: iproc-rng200 - Add support for BCM7278
dt-bindings: rng: Document BCM7278 RNG200 compatible
crypto: chcr - Replace _manual_ swap with swap macro
crypto: marvell - Add a NULL entry at the end of mv_cesa_plat_id_table[]
hwrng: virtio - Virtio RNG devices need to be re-registered after suspend/resume
crypto: atmel - remove empty functions
crypto: ecdh - remove empty exit()
MAINTAINERS: update maintainer for qat
crypto: caam - remove unused param of ctx_map_to_sec4_sg()
crypto: caam - remove unneeded edesc zeroization
crypto: atmel-aes - Reset the controller before each use
crypto: atmel-aes - properly set IV after {en,de}crypt
hwrng: core - Reset user selected rng by writing "" to rng_current
...
135 files changed, 5701 insertions, 4648 deletions
diff --git a/Documentation/crypto/api-samples.rst b/Documentation/crypto/api-samples.rst index 2531948db89f..006827e30d06 100644 --- a/Documentation/crypto/api-samples.rst +++ b/Documentation/crypto/api-samples.rst | |||
@@ -7,59 +7,27 @@ Code Example For Symmetric Key Cipher Operation | |||
7 | :: | 7 | :: |
8 | 8 | ||
9 | 9 | ||
10 | struct tcrypt_result { | ||
11 | struct completion completion; | ||
12 | int err; | ||
13 | }; | ||
14 | |||
15 | /* tie all data structures together */ | 10 | /* tie all data structures together */ |
16 | struct skcipher_def { | 11 | struct skcipher_def { |
17 | struct scatterlist sg; | 12 | struct scatterlist sg; |
18 | struct crypto_skcipher *tfm; | 13 | struct crypto_skcipher *tfm; |
19 | struct skcipher_request *req; | 14 | struct skcipher_request *req; |
20 | struct tcrypt_result result; | 15 | struct crypto_wait wait; |
21 | }; | 16 | }; |
22 | 17 | ||
23 | /* Callback function */ | ||
24 | static void test_skcipher_cb(struct crypto_async_request *req, int error) | ||
25 | { | ||
26 | struct tcrypt_result *result = req->data; | ||
27 | |||
28 | if (error == -EINPROGRESS) | ||
29 | return; | ||
30 | result->err = error; | ||
31 | complete(&result->completion); | ||
32 | pr_info("Encryption finished successfully\n"); | ||
33 | } | ||
34 | |||
35 | /* Perform cipher operation */ | 18 | /* Perform cipher operation */ |
36 | static unsigned int test_skcipher_encdec(struct skcipher_def *sk, | 19 | static unsigned int test_skcipher_encdec(struct skcipher_def *sk, |
37 | int enc) | 20 | int enc) |
38 | { | 21 | { |
39 | int rc = 0; | 22 | int rc; |
40 | 23 | ||
41 | if (enc) | 24 | if (enc) |
42 | rc = crypto_skcipher_encrypt(sk->req); | 25 | rc = crypto_wait_req(crypto_skcipher_encrypt(sk->req), &sk->wait); |
43 | else | 26 | else |
44 | rc = crypto_skcipher_decrypt(sk->req); | 27 | rc = crypto_wait_req(crypto_skcipher_decrypt(sk->req), &sk->wait); |
45 | 28 | ||
46 | switch (rc) { | 29 | if (rc) |
47 | case 0: | 30 | pr_info("skcipher encrypt returned with result %d\n", rc); |
48 | break; | ||
49 | case -EINPROGRESS: | ||
50 | case -EBUSY: | ||
51 | rc = wait_for_completion_interruptible( | ||
52 | &sk->result.completion); | ||
53 | if (!rc && !sk->result.err) { | ||
54 | reinit_completion(&sk->result.completion); | ||
55 | break; | ||
56 | } | ||
57 | default: | ||
58 | pr_info("skcipher encrypt returned with %d result %d\n", | ||
59 | rc, sk->result.err); | ||
60 | break; | ||
61 | } | ||
62 | init_completion(&sk->result.completion); | ||
63 | 31 | ||
64 | return rc; | 32 | return rc; |
65 | } | 33 | } |
@@ -89,8 +57,8 @@ Code Example For Symmetric Key Cipher Operation | |||
89 | } | 57 | } |
90 | 58 | ||
91 | skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | 59 | skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
92 | test_skcipher_cb, | 60 | crypto_req_done, |
93 | &sk.result); | 61 | &sk.wait); |
94 | 62 | ||
95 | /* AES 256 with random key */ | 63 | /* AES 256 with random key */ |
96 | get_random_bytes(&key, 32); | 64 | get_random_bytes(&key, 32); |
@@ -122,7 +90,7 @@ Code Example For Symmetric Key Cipher Operation | |||
122 | /* We encrypt one block */ | 90 | /* We encrypt one block */ |
123 | sg_init_one(&sk.sg, scratchpad, 16); | 91 | sg_init_one(&sk.sg, scratchpad, 16); |
124 | skcipher_request_set_crypt(req, &sk.sg, &sk.sg, 16, ivdata); | 92 | skcipher_request_set_crypt(req, &sk.sg, &sk.sg, 16, ivdata); |
125 | init_completion(&sk.result.completion); | 93 | crypto_init_wait(&sk.wait); |
126 | 94 | ||
127 | /* encrypt data */ | 95 | /* encrypt data */ |
128 | ret = test_skcipher_encdec(&sk, 1); | 96 | ret = test_skcipher_encdec(&sk, 1); |
diff --git a/Documentation/devicetree/bindings/rng/samsung,exynos-rng4.txt b/Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt index 4ca8dd4d7e66..4ca8dd4d7e66 100644 --- a/Documentation/devicetree/bindings/rng/samsung,exynos-rng4.txt +++ b/Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt | |||
diff --git a/Documentation/devicetree/bindings/rng/brcm,iproc-rng200.txt b/Documentation/devicetree/bindings/rng/brcm,iproc-rng200.txt index e25a456664b9..0014da9145af 100644 --- a/Documentation/devicetree/bindings/rng/brcm,iproc-rng200.txt +++ b/Documentation/devicetree/bindings/rng/brcm,iproc-rng200.txt | |||
@@ -1,7 +1,9 @@ | |||
1 | HWRNG support for the iproc-rng200 driver | 1 | HWRNG support for the iproc-rng200 driver |
2 | 2 | ||
3 | Required properties: | 3 | Required properties: |
4 | - compatible : "brcm,iproc-rng200" | 4 | - compatible : Must be one of: |
5 | "brcm,bcm7278-rng200" | ||
6 | "brcm,iproc-rng200" | ||
5 | - reg : base address and size of control register block | 7 | - reg : base address and size of control register block |
6 | 8 | ||
7 | Example: | 9 | Example: |
diff --git a/MAINTAINERS b/MAINTAINERS index 650aa0e0e9d8..9a9343a24528 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -5484,7 +5484,7 @@ F: include/uapi/linux/fb.h | |||
5484 | 5484 | ||
5485 | FREESCALE CAAM (Cryptographic Acceleration and Assurance Module) DRIVER | 5485 | FREESCALE CAAM (Cryptographic Acceleration and Assurance Module) DRIVER |
5486 | M: Horia Geantă <horia.geanta@nxp.com> | 5486 | M: Horia Geantă <horia.geanta@nxp.com> |
5487 | M: Dan Douglass <dan.douglass@nxp.com> | 5487 | M: Aymen Sghaier <aymen.sghaier@nxp.com> |
5488 | L: linux-crypto@vger.kernel.org | 5488 | L: linux-crypto@vger.kernel.org |
5489 | S: Maintained | 5489 | S: Maintained |
5490 | F: drivers/crypto/caam/ | 5490 | F: drivers/crypto/caam/ |
@@ -11060,7 +11060,6 @@ F: drivers/mtd/nand/pxa3xx_nand.c | |||
11060 | 11060 | ||
11061 | QAT DRIVER | 11061 | QAT DRIVER |
11062 | M: Giovanni Cabiddu <giovanni.cabiddu@intel.com> | 11062 | M: Giovanni Cabiddu <giovanni.cabiddu@intel.com> |
11063 | M: Salvatore Benedetto <salvatore.benedetto@intel.com> | ||
11064 | L: qat-linux@intel.com | 11063 | L: qat-linux@intel.com |
11065 | S: Supported | 11064 | S: Supported |
11066 | F: drivers/crypto/qat/ | 11065 | F: drivers/crypto/qat/ |
@@ -11793,7 +11792,7 @@ L: linux-crypto@vger.kernel.org | |||
11793 | L: linux-samsung-soc@vger.kernel.org | 11792 | L: linux-samsung-soc@vger.kernel.org |
11794 | S: Maintained | 11793 | S: Maintained |
11795 | F: drivers/crypto/exynos-rng.c | 11794 | F: drivers/crypto/exynos-rng.c |
11796 | F: Documentation/devicetree/bindings/rng/samsung,exynos-rng4.txt | 11795 | F: Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt |
11797 | 11796 | ||
11798 | SAMSUNG FRAMEBUFFER DRIVER | 11797 | SAMSUNG FRAMEBUFFER DRIVER |
11799 | M: Jingoo Han <jingoohan1@gmail.com> | 11798 | M: Jingoo Han <jingoohan1@gmail.com> |
diff --git a/arch/arm/configs/dove_defconfig b/arch/arm/configs/dove_defconfig index a93cc2fcf791..2f01e84b3d8c 100644 --- a/arch/arm/configs/dove_defconfig +++ b/arch/arm/configs/dove_defconfig | |||
@@ -140,6 +140,6 @@ CONFIG_CRYPTO_TWOFISH=y | |||
140 | CONFIG_CRYPTO_DEFLATE=y | 140 | CONFIG_CRYPTO_DEFLATE=y |
141 | CONFIG_CRYPTO_LZO=y | 141 | CONFIG_CRYPTO_LZO=y |
142 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | 142 | # CONFIG_CRYPTO_ANSI_CPRNG is not set |
143 | CONFIG_CRYPTO_DEV_MV_CESA=y | 143 | CONFIG_CRYPTO_DEV_MARVELL_CESA=y |
144 | CONFIG_CRC_CCITT=y | 144 | CONFIG_CRC_CCITT=y |
145 | CONFIG_LIBCRC32C=y | 145 | CONFIG_LIBCRC32C=y |
diff --git a/arch/arm/configs/multi_v5_defconfig b/arch/arm/configs/multi_v5_defconfig index 69a4bd13eea5..7c41bee28463 100644 --- a/arch/arm/configs/multi_v5_defconfig +++ b/arch/arm/configs/multi_v5_defconfig | |||
@@ -279,6 +279,6 @@ CONFIG_DEBUG_KERNEL=y | |||
279 | CONFIG_DEBUG_USER=y | 279 | CONFIG_DEBUG_USER=y |
280 | CONFIG_CRYPTO_CBC=m | 280 | CONFIG_CRYPTO_CBC=m |
281 | CONFIG_CRYPTO_PCBC=m | 281 | CONFIG_CRYPTO_PCBC=m |
282 | CONFIG_CRYPTO_DEV_MV_CESA=y | 282 | CONFIG_CRYPTO_DEV_MARVELL_CESA=y |
283 | CONFIG_CRC_CCITT=y | 283 | CONFIG_CRC_CCITT=y |
284 | CONFIG_LIBCRC32C=y | 284 | CONFIG_LIBCRC32C=y |
diff --git a/arch/arm/configs/orion5x_defconfig b/arch/arm/configs/orion5x_defconfig index e39ee282e6ca..b831baddae02 100644 --- a/arch/arm/configs/orion5x_defconfig +++ b/arch/arm/configs/orion5x_defconfig | |||
@@ -163,5 +163,5 @@ CONFIG_CRYPTO_CBC=m | |||
163 | CONFIG_CRYPTO_ECB=m | 163 | CONFIG_CRYPTO_ECB=m |
164 | CONFIG_CRYPTO_PCBC=m | 164 | CONFIG_CRYPTO_PCBC=m |
165 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | 165 | # CONFIG_CRYPTO_ANSI_CPRNG is not set |
166 | CONFIG_CRYPTO_DEV_MV_CESA=y | 166 | CONFIG_CRYPTO_DEV_MARVELL_CESA=y |
167 | CONFIG_CRC_T10DIF=y | 167 | CONFIG_CRC_T10DIF=y |
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 5c15d6b57329..3bf3dcf29825 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <crypto/cryptd.h> | 28 | #include <crypto/cryptd.h> |
29 | #include <crypto/ctr.h> | 29 | #include <crypto/ctr.h> |
30 | #include <crypto/b128ops.h> | 30 | #include <crypto/b128ops.h> |
31 | #include <crypto/gcm.h> | ||
31 | #include <crypto/xts.h> | 32 | #include <crypto/xts.h> |
32 | #include <asm/cpu_device_id.h> | 33 | #include <asm/cpu_device_id.h> |
33 | #include <asm/fpu/api.h> | 34 | #include <asm/fpu/api.h> |
@@ -1067,9 +1068,10 @@ static struct skcipher_alg aesni_skciphers[] = { | |||
1067 | } | 1068 | } |
1068 | }; | 1069 | }; |
1069 | 1070 | ||
1071 | static | ||
1070 | struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)]; | 1072 | struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)]; |
1071 | 1073 | ||
1072 | struct { | 1074 | static struct { |
1073 | const char *algname; | 1075 | const char *algname; |
1074 | const char *drvname; | 1076 | const char *drvname; |
1075 | const char *basename; | 1077 | const char *basename; |
@@ -1131,7 +1133,7 @@ static struct aead_alg aesni_aead_algs[] = { { | |||
1131 | .setauthsize = common_rfc4106_set_authsize, | 1133 | .setauthsize = common_rfc4106_set_authsize, |
1132 | .encrypt = helper_rfc4106_encrypt, | 1134 | .encrypt = helper_rfc4106_encrypt, |
1133 | .decrypt = helper_rfc4106_decrypt, | 1135 | .decrypt = helper_rfc4106_decrypt, |
1134 | .ivsize = 8, | 1136 | .ivsize = GCM_RFC4106_IV_SIZE, |
1135 | .maxauthsize = 16, | 1137 | .maxauthsize = 16, |
1136 | .base = { | 1138 | .base = { |
1137 | .cra_name = "__gcm-aes-aesni", | 1139 | .cra_name = "__gcm-aes-aesni", |
@@ -1149,7 +1151,7 @@ static struct aead_alg aesni_aead_algs[] = { { | |||
1149 | .setauthsize = rfc4106_set_authsize, | 1151 | .setauthsize = rfc4106_set_authsize, |
1150 | .encrypt = rfc4106_encrypt, | 1152 | .encrypt = rfc4106_encrypt, |
1151 | .decrypt = rfc4106_decrypt, | 1153 | .decrypt = rfc4106_decrypt, |
1152 | .ivsize = 8, | 1154 | .ivsize = GCM_RFC4106_IV_SIZE, |
1153 | .maxauthsize = 16, | 1155 | .maxauthsize = 16, |
1154 | .base = { | 1156 | .base = { |
1155 | .cra_name = "rfc4106(gcm(aes))", | 1157 | .cra_name = "rfc4106(gcm(aes))", |
@@ -1165,7 +1167,7 @@ static struct aead_alg aesni_aead_algs[] = { { | |||
1165 | .setauthsize = generic_gcmaes_set_authsize, | 1167 | .setauthsize = generic_gcmaes_set_authsize, |
1166 | .encrypt = generic_gcmaes_encrypt, | 1168 | .encrypt = generic_gcmaes_encrypt, |
1167 | .decrypt = generic_gcmaes_decrypt, | 1169 | .decrypt = generic_gcmaes_decrypt, |
1168 | .ivsize = 12, | 1170 | .ivsize = GCM_AES_IV_SIZE, |
1169 | .maxauthsize = 16, | 1171 | .maxauthsize = 16, |
1170 | .base = { | 1172 | .base = { |
1171 | .cra_name = "gcm(aes)", | 1173 | .cra_name = "gcm(aes)", |
diff --git a/arch/x86/crypto/crc32-pclmul_asm.S b/arch/x86/crypto/crc32-pclmul_asm.S index f247304299a2..1c099dc08cc3 100644 --- a/arch/x86/crypto/crc32-pclmul_asm.S +++ b/arch/x86/crypto/crc32-pclmul_asm.S | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <asm/inst.h> | 41 | #include <asm/inst.h> |
42 | 42 | ||
43 | 43 | ||
44 | .section .rodata | ||
44 | .align 16 | 45 | .align 16 |
45 | /* | 46 | /* |
46 | * [x4*128+32 mod P(x) << 32)]' << 1 = 0x154442bd4 | 47 | * [x4*128+32 mod P(x) << 32)]' << 1 = 0x154442bd4 |
@@ -111,19 +112,13 @@ ENTRY(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */ | |||
111 | pxor CONSTANT, %xmm1 | 112 | pxor CONSTANT, %xmm1 |
112 | sub $0x40, LEN | 113 | sub $0x40, LEN |
113 | add $0x40, BUF | 114 | add $0x40, BUF |
114 | #ifndef __x86_64__ | ||
115 | /* This is for position independent code(-fPIC) support for 32bit */ | ||
116 | call delta | ||
117 | delta: | ||
118 | pop %ecx | ||
119 | #endif | ||
120 | cmp $0x40, LEN | 115 | cmp $0x40, LEN |
121 | jb less_64 | 116 | jb less_64 |
122 | 117 | ||
123 | #ifdef __x86_64__ | 118 | #ifdef __x86_64__ |
124 | movdqa .Lconstant_R2R1(%rip), CONSTANT | 119 | movdqa .Lconstant_R2R1(%rip), CONSTANT |
125 | #else | 120 | #else |
126 | movdqa .Lconstant_R2R1 - delta(%ecx), CONSTANT | 121 | movdqa .Lconstant_R2R1, CONSTANT |
127 | #endif | 122 | #endif |
128 | 123 | ||
129 | loop_64:/* 64 bytes Full cache line folding */ | 124 | loop_64:/* 64 bytes Full cache line folding */ |
@@ -172,7 +167,7 @@ less_64:/* Folding cache line into 128bit */ | |||
172 | #ifdef __x86_64__ | 167 | #ifdef __x86_64__ |
173 | movdqa .Lconstant_R4R3(%rip), CONSTANT | 168 | movdqa .Lconstant_R4R3(%rip), CONSTANT |
174 | #else | 169 | #else |
175 | movdqa .Lconstant_R4R3 - delta(%ecx), CONSTANT | 170 | movdqa .Lconstant_R4R3, CONSTANT |
176 | #endif | 171 | #endif |
177 | prefetchnta (BUF) | 172 | prefetchnta (BUF) |
178 | 173 | ||
@@ -220,8 +215,8 @@ fold_64: | |||
220 | movdqa .Lconstant_R5(%rip), CONSTANT | 215 | movdqa .Lconstant_R5(%rip), CONSTANT |
221 | movdqa .Lconstant_mask32(%rip), %xmm3 | 216 | movdqa .Lconstant_mask32(%rip), %xmm3 |
222 | #else | 217 | #else |
223 | movdqa .Lconstant_R5 - delta(%ecx), CONSTANT | 218 | movdqa .Lconstant_R5, CONSTANT |
224 | movdqa .Lconstant_mask32 - delta(%ecx), %xmm3 | 219 | movdqa .Lconstant_mask32, %xmm3 |
225 | #endif | 220 | #endif |
226 | psrldq $0x04, %xmm2 | 221 | psrldq $0x04, %xmm2 |
227 | pand %xmm3, %xmm1 | 222 | pand %xmm3, %xmm1 |
@@ -232,7 +227,7 @@ fold_64: | |||
232 | #ifdef __x86_64__ | 227 | #ifdef __x86_64__ |
233 | movdqa .Lconstant_RUpoly(%rip), CONSTANT | 228 | movdqa .Lconstant_RUpoly(%rip), CONSTANT |
234 | #else | 229 | #else |
235 | movdqa .Lconstant_RUpoly - delta(%ecx), CONSTANT | 230 | movdqa .Lconstant_RUpoly, CONSTANT |
236 | #endif | 231 | #endif |
237 | movdqa %xmm1, %xmm2 | 232 | movdqa %xmm1, %xmm2 |
238 | pand %xmm3, %xmm1 | 233 | pand %xmm3, %xmm1 |
diff --git a/crypto/Kconfig b/crypto/Kconfig index ac5fb37e6f4b..f7911963bb79 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
@@ -860,6 +860,17 @@ config CRYPTO_SHA3 | |||
860 | References: | 860 | References: |
861 | http://keccak.noekeon.org/ | 861 | http://keccak.noekeon.org/ |
862 | 862 | ||
863 | config CRYPTO_SM3 | ||
864 | tristate "SM3 digest algorithm" | ||
865 | select CRYPTO_HASH | ||
866 | help | ||
867 | SM3 secure hash function as defined by OSCCA GM/T 0004-2012 SM3). | ||
868 | It is part of the Chinese Commercial Cryptography suite. | ||
869 | |||
870 | References: | ||
871 | http://www.oscca.gov.cn/UpFile/20101222141857786.pdf | ||
872 | https://datatracker.ietf.org/doc/html/draft-shen-sm3-hash | ||
873 | |||
863 | config CRYPTO_TGR192 | 874 | config CRYPTO_TGR192 |
864 | tristate "Tiger digest algorithms" | 875 | tristate "Tiger digest algorithms" |
865 | select CRYPTO_HASH | 876 | select CRYPTO_HASH |
diff --git a/crypto/Makefile b/crypto/Makefile index da190be60ce2..d674884b2d51 100644 --- a/crypto/Makefile +++ b/crypto/Makefile | |||
@@ -71,6 +71,7 @@ obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o | |||
71 | obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o | 71 | obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o |
72 | obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o | 72 | obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o |
73 | obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o | 73 | obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o |
74 | obj-$(CONFIG_CRYPTO_SM3) += sm3_generic.o | ||
74 | obj-$(CONFIG_CRYPTO_WP512) += wp512.o | 75 | obj-$(CONFIG_CRYPTO_WP512) += wp512.o |
75 | CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 | 76 | CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 |
76 | obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o | 77 | obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o |
diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 337cf382718e..85cea9de324a 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c | |||
@@ -481,33 +481,6 @@ int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con) | |||
481 | } | 481 | } |
482 | EXPORT_SYMBOL_GPL(af_alg_cmsg_send); | 482 | EXPORT_SYMBOL_GPL(af_alg_cmsg_send); |
483 | 483 | ||
484 | int af_alg_wait_for_completion(int err, struct af_alg_completion *completion) | ||
485 | { | ||
486 | switch (err) { | ||
487 | case -EINPROGRESS: | ||
488 | case -EBUSY: | ||
489 | wait_for_completion(&completion->completion); | ||
490 | reinit_completion(&completion->completion); | ||
491 | err = completion->err; | ||
492 | break; | ||
493 | }; | ||
494 | |||
495 | return err; | ||
496 | } | ||
497 | EXPORT_SYMBOL_GPL(af_alg_wait_for_completion); | ||
498 | |||
499 | void af_alg_complete(struct crypto_async_request *req, int err) | ||
500 | { | ||
501 | struct af_alg_completion *completion = req->data; | ||
502 | |||
503 | if (err == -EINPROGRESS) | ||
504 | return; | ||
505 | |||
506 | completion->err = err; | ||
507 | complete(&completion->completion); | ||
508 | } | ||
509 | EXPORT_SYMBOL_GPL(af_alg_complete); | ||
510 | |||
511 | /** | 484 | /** |
512 | * af_alg_alloc_tsgl - allocate the TX SGL | 485 | * af_alg_alloc_tsgl - allocate the TX SGL |
513 | * | 486 | * |
diff --git a/crypto/ahash.c b/crypto/ahash.c index 5e8666e6ccae..3a35d67de7d9 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c | |||
@@ -334,9 +334,7 @@ static int ahash_op_unaligned(struct ahash_request *req, | |||
334 | return err; | 334 | return err; |
335 | 335 | ||
336 | err = op(req); | 336 | err = op(req); |
337 | if (err == -EINPROGRESS || | 337 | if (err == -EINPROGRESS || err == -EBUSY) |
338 | (err == -EBUSY && (ahash_request_flags(req) & | ||
339 | CRYPTO_TFM_REQ_MAY_BACKLOG))) | ||
340 | return err; | 338 | return err; |
341 | 339 | ||
342 | ahash_restore_req(req, err); | 340 | ahash_restore_req(req, err); |
@@ -394,9 +392,7 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err) | |||
394 | req->base.complete = ahash_def_finup_done2; | 392 | req->base.complete = ahash_def_finup_done2; |
395 | 393 | ||
396 | err = crypto_ahash_reqtfm(req)->final(req); | 394 | err = crypto_ahash_reqtfm(req)->final(req); |
397 | if (err == -EINPROGRESS || | 395 | if (err == -EINPROGRESS || err == -EBUSY) |
398 | (err == -EBUSY && (ahash_request_flags(req) & | ||
399 | CRYPTO_TFM_REQ_MAY_BACKLOG))) | ||
400 | return err; | 396 | return err; |
401 | 397 | ||
402 | out: | 398 | out: |
@@ -432,9 +428,7 @@ static int ahash_def_finup(struct ahash_request *req) | |||
432 | return err; | 428 | return err; |
433 | 429 | ||
434 | err = tfm->update(req); | 430 | err = tfm->update(req); |
435 | if (err == -EINPROGRESS || | 431 | if (err == -EINPROGRESS || err == -EBUSY) |
436 | (err == -EBUSY && (ahash_request_flags(req) & | ||
437 | CRYPTO_TFM_REQ_MAY_BACKLOG))) | ||
438 | return err; | 432 | return err; |
439 | 433 | ||
440 | return ahash_def_finup_finish1(req, err); | 434 | return ahash_def_finup_finish1(req, err); |
diff --git a/crypto/algapi.c b/crypto/algapi.c index aa699ff6c876..60d7366ed343 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c | |||
@@ -897,9 +897,11 @@ int crypto_enqueue_request(struct crypto_queue *queue, | |||
897 | int err = -EINPROGRESS; | 897 | int err = -EINPROGRESS; |
898 | 898 | ||
899 | if (unlikely(queue->qlen >= queue->max_qlen)) { | 899 | if (unlikely(queue->qlen >= queue->max_qlen)) { |
900 | err = -EBUSY; | 900 | if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { |
901 | if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | 901 | err = -ENOSPC; |
902 | goto out; | 902 | goto out; |
903 | } | ||
904 | err = -EBUSY; | ||
903 | if (queue->backlog == &queue->list) | 905 | if (queue->backlog == &queue->list) |
904 | queue->backlog = &request->list; | 906 | queue->backlog = &request->list; |
905 | } | 907 | } |
diff --git a/crypto/algboss.c b/crypto/algboss.c index 960d8548171b..5e6df2a087fa 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c | |||
@@ -122,7 +122,6 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) | |||
122 | int notnum = 0; | 122 | int notnum = 0; |
123 | 123 | ||
124 | name = ++p; | 124 | name = ++p; |
125 | len = 0; | ||
126 | 125 | ||
127 | for (; isalnum(*p) || *p == '-' || *p == '_'; p++) | 126 | for (; isalnum(*p) || *p == '-' || *p == '_'; p++) |
128 | notnum |= !isdigit(*p); | 127 | notnum |= !isdigit(*p); |
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 516b38c3a169..aacae0837aff 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c | |||
@@ -278,11 +278,11 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, | |||
278 | /* Synchronous operation */ | 278 | /* Synchronous operation */ |
279 | aead_request_set_callback(&areq->cra_u.aead_req, | 279 | aead_request_set_callback(&areq->cra_u.aead_req, |
280 | CRYPTO_TFM_REQ_MAY_BACKLOG, | 280 | CRYPTO_TFM_REQ_MAY_BACKLOG, |
281 | af_alg_complete, &ctx->completion); | 281 | crypto_req_done, &ctx->wait); |
282 | err = af_alg_wait_for_completion(ctx->enc ? | 282 | err = crypto_wait_req(ctx->enc ? |
283 | crypto_aead_encrypt(&areq->cra_u.aead_req) : | 283 | crypto_aead_encrypt(&areq->cra_u.aead_req) : |
284 | crypto_aead_decrypt(&areq->cra_u.aead_req), | 284 | crypto_aead_decrypt(&areq->cra_u.aead_req), |
285 | &ctx->completion); | 285 | &ctx->wait); |
286 | } | 286 | } |
287 | 287 | ||
288 | /* AIO operation in progress */ | 288 | /* AIO operation in progress */ |
@@ -554,7 +554,7 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk) | |||
554 | ctx->merge = 0; | 554 | ctx->merge = 0; |
555 | ctx->enc = 0; | 555 | ctx->enc = 0; |
556 | ctx->aead_assoclen = 0; | 556 | ctx->aead_assoclen = 0; |
557 | af_alg_init_completion(&ctx->completion); | 557 | crypto_init_wait(&ctx->wait); |
558 | 558 | ||
559 | ask->private = ctx; | 559 | ask->private = ctx; |
560 | 560 | ||
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 5e92bd275ef3..76d2e716c792 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c | |||
@@ -26,7 +26,7 @@ struct hash_ctx { | |||
26 | 26 | ||
27 | u8 *result; | 27 | u8 *result; |
28 | 28 | ||
29 | struct af_alg_completion completion; | 29 | struct crypto_wait wait; |
30 | 30 | ||
31 | unsigned int len; | 31 | unsigned int len; |
32 | bool more; | 32 | bool more; |
@@ -88,8 +88,7 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, | |||
88 | if ((msg->msg_flags & MSG_MORE)) | 88 | if ((msg->msg_flags & MSG_MORE)) |
89 | hash_free_result(sk, ctx); | 89 | hash_free_result(sk, ctx); |
90 | 90 | ||
91 | err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req), | 91 | err = crypto_wait_req(crypto_ahash_init(&ctx->req), &ctx->wait); |
92 | &ctx->completion); | ||
93 | if (err) | 92 | if (err) |
94 | goto unlock; | 93 | goto unlock; |
95 | } | 94 | } |
@@ -110,8 +109,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, | |||
110 | 109 | ||
111 | ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len); | 110 | ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len); |
112 | 111 | ||
113 | err = af_alg_wait_for_completion(crypto_ahash_update(&ctx->req), | 112 | err = crypto_wait_req(crypto_ahash_update(&ctx->req), |
114 | &ctx->completion); | 113 | &ctx->wait); |
115 | af_alg_free_sg(&ctx->sgl); | 114 | af_alg_free_sg(&ctx->sgl); |
116 | if (err) | 115 | if (err) |
117 | goto unlock; | 116 | goto unlock; |
@@ -129,8 +128,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, | |||
129 | goto unlock; | 128 | goto unlock; |
130 | 129 | ||
131 | ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); | 130 | ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); |
132 | err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), | 131 | err = crypto_wait_req(crypto_ahash_final(&ctx->req), |
133 | &ctx->completion); | 132 | &ctx->wait); |
134 | } | 133 | } |
135 | 134 | ||
136 | unlock: | 135 | unlock: |
@@ -171,7 +170,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page, | |||
171 | } else { | 170 | } else { |
172 | if (!ctx->more) { | 171 | if (!ctx->more) { |
173 | err = crypto_ahash_init(&ctx->req); | 172 | err = crypto_ahash_init(&ctx->req); |
174 | err = af_alg_wait_for_completion(err, &ctx->completion); | 173 | err = crypto_wait_req(err, &ctx->wait); |
175 | if (err) | 174 | if (err) |
176 | goto unlock; | 175 | goto unlock; |
177 | } | 176 | } |
@@ -179,7 +178,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page, | |||
179 | err = crypto_ahash_update(&ctx->req); | 178 | err = crypto_ahash_update(&ctx->req); |
180 | } | 179 | } |
181 | 180 | ||
182 | err = af_alg_wait_for_completion(err, &ctx->completion); | 181 | err = crypto_wait_req(err, &ctx->wait); |
183 | if (err) | 182 | if (err) |
184 | goto unlock; | 183 | goto unlock; |
185 | 184 | ||
@@ -215,17 +214,16 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, | |||
215 | ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); | 214 | ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); |
216 | 215 | ||
217 | if (!result && !ctx->more) { | 216 | if (!result && !ctx->more) { |
218 | err = af_alg_wait_for_completion( | 217 | err = crypto_wait_req(crypto_ahash_init(&ctx->req), |
219 | crypto_ahash_init(&ctx->req), | 218 | &ctx->wait); |
220 | &ctx->completion); | ||
221 | if (err) | 219 | if (err) |
222 | goto unlock; | 220 | goto unlock; |
223 | } | 221 | } |
224 | 222 | ||
225 | if (!result || ctx->more) { | 223 | if (!result || ctx->more) { |
226 | ctx->more = 0; | 224 | ctx->more = 0; |
227 | err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), | 225 | err = crypto_wait_req(crypto_ahash_final(&ctx->req), |
228 | &ctx->completion); | 226 | &ctx->wait); |
229 | if (err) | 227 | if (err) |
230 | goto unlock; | 228 | goto unlock; |
231 | } | 229 | } |
@@ -476,13 +474,13 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk) | |||
476 | ctx->result = NULL; | 474 | ctx->result = NULL; |
477 | ctx->len = len; | 475 | ctx->len = len; |
478 | ctx->more = 0; | 476 | ctx->more = 0; |
479 | af_alg_init_completion(&ctx->completion); | 477 | crypto_init_wait(&ctx->wait); |
480 | 478 | ||
481 | ask->private = ctx; | 479 | ask->private = ctx; |
482 | 480 | ||
483 | ahash_request_set_tfm(&ctx->req, hash); | 481 | ahash_request_set_tfm(&ctx->req, hash); |
484 | ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, | 482 | ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
485 | af_alg_complete, &ctx->completion); | 483 | crypto_req_done, &ctx->wait); |
486 | 484 | ||
487 | sk->sk_destruct = hash_sock_destruct; | 485 | sk->sk_destruct = hash_sock_destruct; |
488 | 486 | ||
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 8ae4170aaeb4..9954b078f0b9 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c | |||
@@ -129,12 +129,11 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, | |||
129 | skcipher_request_set_callback(&areq->cra_u.skcipher_req, | 129 | skcipher_request_set_callback(&areq->cra_u.skcipher_req, |
130 | CRYPTO_TFM_REQ_MAY_SLEEP | | 130 | CRYPTO_TFM_REQ_MAY_SLEEP | |
131 | CRYPTO_TFM_REQ_MAY_BACKLOG, | 131 | CRYPTO_TFM_REQ_MAY_BACKLOG, |
132 | af_alg_complete, | 132 | crypto_req_done, &ctx->wait); |
133 | &ctx->completion); | 133 | err = crypto_wait_req(ctx->enc ? |
134 | err = af_alg_wait_for_completion(ctx->enc ? | ||
135 | crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) : | 134 | crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) : |
136 | crypto_skcipher_decrypt(&areq->cra_u.skcipher_req), | 135 | crypto_skcipher_decrypt(&areq->cra_u.skcipher_req), |
137 | &ctx->completion); | 136 | &ctx->wait); |
138 | } | 137 | } |
139 | 138 | ||
140 | /* AIO operation in progress */ | 139 | /* AIO operation in progress */ |
@@ -388,7 +387,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) | |||
388 | ctx->more = 0; | 387 | ctx->more = 0; |
389 | ctx->merge = 0; | 388 | ctx->merge = 0; |
390 | ctx->enc = 0; | 389 | ctx->enc = 0; |
391 | af_alg_init_completion(&ctx->completion); | 390 | crypto_init_wait(&ctx->wait); |
392 | 391 | ||
393 | ask->private = ctx; | 392 | ask->private = ctx; |
394 | 393 | ||
diff --git a/crypto/api.c b/crypto/api.c index 941cd4c6c7ec..2a2479d168aa 100644 --- a/crypto/api.c +++ b/crypto/api.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/sched/signal.h> | 24 | #include <linux/sched/signal.h> |
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include <linux/string.h> | 26 | #include <linux/string.h> |
27 | #include <linux/completion.h> | ||
27 | #include "internal.h" | 28 | #include "internal.h" |
28 | 29 | ||
29 | LIST_HEAD(crypto_alg_list); | 30 | LIST_HEAD(crypto_alg_list); |
@@ -595,5 +596,17 @@ int crypto_has_alg(const char *name, u32 type, u32 mask) | |||
595 | } | 596 | } |
596 | EXPORT_SYMBOL_GPL(crypto_has_alg); | 597 | EXPORT_SYMBOL_GPL(crypto_has_alg); |
597 | 598 | ||
599 | void crypto_req_done(struct crypto_async_request *req, int err) | ||
600 | { | ||
601 | struct crypto_wait *wait = req->data; | ||
602 | |||
603 | if (err == -EINPROGRESS) | ||
604 | return; | ||
605 | |||
606 | wait->err = err; | ||
607 | complete(&wait->completion); | ||
608 | } | ||
609 | EXPORT_SYMBOL_GPL(crypto_req_done); | ||
610 | |||
598 | MODULE_DESCRIPTION("Cryptographic core API"); | 611 | MODULE_DESCRIPTION("Cryptographic core API"); |
599 | MODULE_LICENSE("GPL"); | 612 | MODULE_LICENSE("GPL"); |
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index 3cd6e12cfc46..d916235d6cf5 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c | |||
@@ -57,29 +57,13 @@ static void public_key_destroy(void *payload0, void *payload3) | |||
57 | public_key_signature_free(payload3); | 57 | public_key_signature_free(payload3); |
58 | } | 58 | } |
59 | 59 | ||
60 | struct public_key_completion { | ||
61 | struct completion completion; | ||
62 | int err; | ||
63 | }; | ||
64 | |||
65 | static void public_key_verify_done(struct crypto_async_request *req, int err) | ||
66 | { | ||
67 | struct public_key_completion *compl = req->data; | ||
68 | |||
69 | if (err == -EINPROGRESS) | ||
70 | return; | ||
71 | |||
72 | compl->err = err; | ||
73 | complete(&compl->completion); | ||
74 | } | ||
75 | |||
76 | /* | 60 | /* |
77 | * Verify a signature using a public key. | 61 | * Verify a signature using a public key. |
78 | */ | 62 | */ |
79 | int public_key_verify_signature(const struct public_key *pkey, | 63 | int public_key_verify_signature(const struct public_key *pkey, |
80 | const struct public_key_signature *sig) | 64 | const struct public_key_signature *sig) |
81 | { | 65 | { |
82 | struct public_key_completion compl; | 66 | struct crypto_wait cwait; |
83 | struct crypto_akcipher *tfm; | 67 | struct crypto_akcipher *tfm; |
84 | struct akcipher_request *req; | 68 | struct akcipher_request *req; |
85 | struct scatterlist sig_sg, digest_sg; | 69 | struct scatterlist sig_sg, digest_sg; |
@@ -131,20 +115,16 @@ int public_key_verify_signature(const struct public_key *pkey, | |||
131 | sg_init_one(&digest_sg, output, outlen); | 115 | sg_init_one(&digest_sg, output, outlen); |
132 | akcipher_request_set_crypt(req, &sig_sg, &digest_sg, sig->s_size, | 116 | akcipher_request_set_crypt(req, &sig_sg, &digest_sg, sig->s_size, |
133 | outlen); | 117 | outlen); |
134 | init_completion(&compl.completion); | 118 | crypto_init_wait(&cwait); |
135 | akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | | 119 | akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | |
136 | CRYPTO_TFM_REQ_MAY_SLEEP, | 120 | CRYPTO_TFM_REQ_MAY_SLEEP, |
137 | public_key_verify_done, &compl); | 121 | crypto_req_done, &cwait); |
138 | 122 | ||
139 | /* Perform the verification calculation. This doesn't actually do the | 123 | /* Perform the verification calculation. This doesn't actually do the |
140 | * verification, but rather calculates the hash expected by the | 124 | * verification, but rather calculates the hash expected by the |
141 | * signature and returns that to us. | 125 | * signature and returns that to us. |
142 | */ | 126 | */ |
143 | ret = crypto_akcipher_verify(req); | 127 | ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait); |
144 | if ((ret == -EINPROGRESS) || (ret == -EBUSY)) { | ||
145 | wait_for_completion(&compl.completion); | ||
146 | ret = compl.err; | ||
147 | } | ||
148 | if (ret < 0) | 128 | if (ret < 0) |
149 | goto out_free_output; | 129 | goto out_free_output; |
150 | 130 | ||
diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 0508c48a45c4..bd43cf5be14c 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c | |||
@@ -137,16 +137,14 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, | |||
137 | int cpu, err; | 137 | int cpu, err; |
138 | struct cryptd_cpu_queue *cpu_queue; | 138 | struct cryptd_cpu_queue *cpu_queue; |
139 | atomic_t *refcnt; | 139 | atomic_t *refcnt; |
140 | bool may_backlog; | ||
141 | 140 | ||
142 | cpu = get_cpu(); | 141 | cpu = get_cpu(); |
143 | cpu_queue = this_cpu_ptr(queue->cpu_queue); | 142 | cpu_queue = this_cpu_ptr(queue->cpu_queue); |
144 | err = crypto_enqueue_request(&cpu_queue->queue, request); | 143 | err = crypto_enqueue_request(&cpu_queue->queue, request); |
145 | 144 | ||
146 | refcnt = crypto_tfm_ctx(request->tfm); | 145 | refcnt = crypto_tfm_ctx(request->tfm); |
147 | may_backlog = request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG; | ||
148 | 146 | ||
149 | if (err == -EBUSY && !may_backlog) | 147 | if (err == -ENOSPC) |
150 | goto out_put_cpu; | 148 | goto out_put_cpu; |
151 | 149 | ||
152 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); | 150 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); |
diff --git a/crypto/cts.c b/crypto/cts.c index 243f591dc409..4773c188e6d9 100644 --- a/crypto/cts.c +++ b/crypto/cts.c | |||
@@ -136,8 +136,7 @@ static void crypto_cts_encrypt_done(struct crypto_async_request *areq, int err) | |||
136 | goto out; | 136 | goto out; |
137 | 137 | ||
138 | err = cts_cbc_encrypt(req); | 138 | err = cts_cbc_encrypt(req); |
139 | if (err == -EINPROGRESS || | 139 | if (err == -EINPROGRESS || err == -EBUSY) |
140 | (err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | ||
141 | return; | 140 | return; |
142 | 141 | ||
143 | out: | 142 | out: |
@@ -229,8 +228,7 @@ static void crypto_cts_decrypt_done(struct crypto_async_request *areq, int err) | |||
229 | goto out; | 228 | goto out; |
230 | 229 | ||
231 | err = cts_cbc_decrypt(req); | 230 | err = cts_cbc_decrypt(req); |
232 | if (err == -EINPROGRESS || | 231 | if (err == -EINPROGRESS || err == -EBUSY) |
233 | (err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | ||
234 | return; | 232 | return; |
235 | 233 | ||
236 | out: | 234 | out: |
diff --git a/crypto/dh.c b/crypto/dh.c index b1032a5c1bfa..5659fe7f446d 100644 --- a/crypto/dh.c +++ b/crypto/dh.c | |||
@@ -21,19 +21,12 @@ struct dh_ctx { | |||
21 | MPI xa; | 21 | MPI xa; |
22 | }; | 22 | }; |
23 | 23 | ||
24 | static inline void dh_clear_params(struct dh_ctx *ctx) | 24 | static void dh_clear_ctx(struct dh_ctx *ctx) |
25 | { | 25 | { |
26 | mpi_free(ctx->p); | 26 | mpi_free(ctx->p); |
27 | mpi_free(ctx->g); | 27 | mpi_free(ctx->g); |
28 | ctx->p = NULL; | ||
29 | ctx->g = NULL; | ||
30 | } | ||
31 | |||
32 | static void dh_free_ctx(struct dh_ctx *ctx) | ||
33 | { | ||
34 | dh_clear_params(ctx); | ||
35 | mpi_free(ctx->xa); | 28 | mpi_free(ctx->xa); |
36 | ctx->xa = NULL; | 29 | memset(ctx, 0, sizeof(*ctx)); |
37 | } | 30 | } |
38 | 31 | ||
39 | /* | 32 | /* |
@@ -60,9 +53,6 @@ static int dh_check_params_length(unsigned int p_len) | |||
60 | 53 | ||
61 | static int dh_set_params(struct dh_ctx *ctx, struct dh *params) | 54 | static int dh_set_params(struct dh_ctx *ctx, struct dh *params) |
62 | { | 55 | { |
63 | if (unlikely(!params->p || !params->g)) | ||
64 | return -EINVAL; | ||
65 | |||
66 | if (dh_check_params_length(params->p_size << 3)) | 56 | if (dh_check_params_length(params->p_size << 3)) |
67 | return -EINVAL; | 57 | return -EINVAL; |
68 | 58 | ||
@@ -71,10 +61,8 @@ static int dh_set_params(struct dh_ctx *ctx, struct dh *params) | |||
71 | return -EINVAL; | 61 | return -EINVAL; |
72 | 62 | ||
73 | ctx->g = mpi_read_raw_data(params->g, params->g_size); | 63 | ctx->g = mpi_read_raw_data(params->g, params->g_size); |
74 | if (!ctx->g) { | 64 | if (!ctx->g) |
75 | mpi_free(ctx->p); | ||
76 | return -EINVAL; | 65 | return -EINVAL; |
77 | } | ||
78 | 66 | ||
79 | return 0; | 67 | return 0; |
80 | } | 68 | } |
@@ -86,21 +74,23 @@ static int dh_set_secret(struct crypto_kpp *tfm, const void *buf, | |||
86 | struct dh params; | 74 | struct dh params; |
87 | 75 | ||
88 | /* Free the old MPI key if any */ | 76 | /* Free the old MPI key if any */ |
89 | dh_free_ctx(ctx); | 77 | dh_clear_ctx(ctx); |
90 | 78 | ||
91 | if (crypto_dh_decode_key(buf, len, ¶ms) < 0) | 79 | if (crypto_dh_decode_key(buf, len, ¶ms) < 0) |
92 | return -EINVAL; | 80 | goto err_clear_ctx; |
93 | 81 | ||
94 | if (dh_set_params(ctx, ¶ms) < 0) | 82 | if (dh_set_params(ctx, ¶ms) < 0) |
95 | return -EINVAL; | 83 | goto err_clear_ctx; |
96 | 84 | ||
97 | ctx->xa = mpi_read_raw_data(params.key, params.key_size); | 85 | ctx->xa = mpi_read_raw_data(params.key, params.key_size); |
98 | if (!ctx->xa) { | 86 | if (!ctx->xa) |
99 | dh_clear_params(ctx); | 87 | goto err_clear_ctx; |
100 | return -EINVAL; | ||
101 | } | ||
102 | 88 | ||
103 | return 0; | 89 | return 0; |
90 | |||
91 | err_clear_ctx: | ||
92 | dh_clear_ctx(ctx); | ||
93 | return -EINVAL; | ||
104 | } | 94 | } |
105 | 95 | ||
106 | static int dh_compute_value(struct kpp_request *req) | 96 | static int dh_compute_value(struct kpp_request *req) |
@@ -158,7 +148,7 @@ static void dh_exit_tfm(struct crypto_kpp *tfm) | |||
158 | { | 148 | { |
159 | struct dh_ctx *ctx = dh_get_ctx(tfm); | 149 | struct dh_ctx *ctx = dh_get_ctx(tfm); |
160 | 150 | ||
161 | dh_free_ctx(ctx); | 151 | dh_clear_ctx(ctx); |
162 | } | 152 | } |
163 | 153 | ||
164 | static struct kpp_alg dh = { | 154 | static struct kpp_alg dh = { |
diff --git a/crypto/dh_helper.c b/crypto/dh_helper.c index 8ba8a3f82620..24fdb2ecaa85 100644 --- a/crypto/dh_helper.c +++ b/crypto/dh_helper.c | |||
@@ -28,12 +28,12 @@ static inline const u8 *dh_unpack_data(void *dst, const void *src, size_t size) | |||
28 | return src + size; | 28 | return src + size; |
29 | } | 29 | } |
30 | 30 | ||
31 | static inline int dh_data_size(const struct dh *p) | 31 | static inline unsigned int dh_data_size(const struct dh *p) |
32 | { | 32 | { |
33 | return p->key_size + p->p_size + p->g_size; | 33 | return p->key_size + p->p_size + p->g_size; |
34 | } | 34 | } |
35 | 35 | ||
36 | int crypto_dh_key_len(const struct dh *p) | 36 | unsigned int crypto_dh_key_len(const struct dh *p) |
37 | { | 37 | { |
38 | return DH_KPP_SECRET_MIN_SIZE + dh_data_size(p); | 38 | return DH_KPP_SECRET_MIN_SIZE + dh_data_size(p); |
39 | } | 39 | } |
@@ -83,6 +83,14 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params) | |||
83 | if (secret.len != crypto_dh_key_len(params)) | 83 | if (secret.len != crypto_dh_key_len(params)) |
84 | return -EINVAL; | 84 | return -EINVAL; |
85 | 85 | ||
86 | /* | ||
87 | * Don't permit the buffer for 'key' or 'g' to be larger than 'p', since | ||
88 | * some drivers assume otherwise. | ||
89 | */ | ||
90 | if (params->key_size > params->p_size || | ||
91 | params->g_size > params->p_size) | ||
92 | return -EINVAL; | ||
93 | |||
86 | /* Don't allocate memory. Set pointers to data within | 94 | /* Don't allocate memory. Set pointers to data within |
87 | * the given buffer | 95 | * the given buffer |
88 | */ | 96 | */ |
@@ -90,6 +98,14 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params) | |||
90 | params->p = (void *)(ptr + params->key_size); | 98 | params->p = (void *)(ptr + params->key_size); |
91 | params->g = (void *)(ptr + params->key_size + params->p_size); | 99 | params->g = (void *)(ptr + params->key_size + params->p_size); |
92 | 100 | ||
101 | /* | ||
102 | * Don't permit 'p' to be 0. It's not a prime number, and it's subject | ||
103 | * to corner cases such as 'mod 0' being undefined or | ||
104 | * crypto_kpp_maxsize() returning 0. | ||
105 | */ | ||
106 | if (memchr_inv(params->p, 0, params->p_size) == NULL) | ||
107 | return -EINVAL; | ||
108 | |||
93 | return 0; | 109 | return 0; |
94 | } | 110 | } |
95 | EXPORT_SYMBOL_GPL(crypto_dh_decode_key); | 111 | EXPORT_SYMBOL_GPL(crypto_dh_decode_key); |
diff --git a/crypto/drbg.c b/crypto/drbg.c index 70018397e59a..4faa2781c964 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c | |||
@@ -1651,16 +1651,6 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg) | |||
1651 | return 0; | 1651 | return 0; |
1652 | } | 1652 | } |
1653 | 1653 | ||
1654 | static void drbg_skcipher_cb(struct crypto_async_request *req, int error) | ||
1655 | { | ||
1656 | struct drbg_state *drbg = req->data; | ||
1657 | |||
1658 | if (error == -EINPROGRESS) | ||
1659 | return; | ||
1660 | drbg->ctr_async_err = error; | ||
1661 | complete(&drbg->ctr_completion); | ||
1662 | } | ||
1663 | |||
1664 | static int drbg_init_sym_kernel(struct drbg_state *drbg) | 1654 | static int drbg_init_sym_kernel(struct drbg_state *drbg) |
1665 | { | 1655 | { |
1666 | struct crypto_cipher *tfm; | 1656 | struct crypto_cipher *tfm; |
@@ -1691,7 +1681,7 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg) | |||
1691 | return PTR_ERR(sk_tfm); | 1681 | return PTR_ERR(sk_tfm); |
1692 | } | 1682 | } |
1693 | drbg->ctr_handle = sk_tfm; | 1683 | drbg->ctr_handle = sk_tfm; |
1694 | init_completion(&drbg->ctr_completion); | 1684 | crypto_init_wait(&drbg->ctr_wait); |
1695 | 1685 | ||
1696 | req = skcipher_request_alloc(sk_tfm, GFP_KERNEL); | 1686 | req = skcipher_request_alloc(sk_tfm, GFP_KERNEL); |
1697 | if (!req) { | 1687 | if (!req) { |
@@ -1700,8 +1690,9 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg) | |||
1700 | return -ENOMEM; | 1690 | return -ENOMEM; |
1701 | } | 1691 | } |
1702 | drbg->ctr_req = req; | 1692 | drbg->ctr_req = req; |
1703 | skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | 1693 | skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | |
1704 | drbg_skcipher_cb, drbg); | 1694 | CRYPTO_TFM_REQ_MAY_SLEEP, |
1695 | crypto_req_done, &drbg->ctr_wait); | ||
1705 | 1696 | ||
1706 | alignmask = crypto_skcipher_alignmask(sk_tfm); | 1697 | alignmask = crypto_skcipher_alignmask(sk_tfm); |
1707 | drbg->ctr_null_value_buf = kzalloc(DRBG_CTR_NULL_LEN + alignmask, | 1698 | drbg->ctr_null_value_buf = kzalloc(DRBG_CTR_NULL_LEN + alignmask, |
@@ -1762,21 +1753,12 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg, | |||
1762 | /* Output buffer may not be valid for SGL, use scratchpad */ | 1753 | /* Output buffer may not be valid for SGL, use scratchpad */ |
1763 | skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out, | 1754 | skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out, |
1764 | cryptlen, drbg->V); | 1755 | cryptlen, drbg->V); |
1765 | ret = crypto_skcipher_encrypt(drbg->ctr_req); | 1756 | ret = crypto_wait_req(crypto_skcipher_encrypt(drbg->ctr_req), |
1766 | switch (ret) { | 1757 | &drbg->ctr_wait); |
1767 | case 0: | 1758 | if (ret) |
1768 | break; | ||
1769 | case -EINPROGRESS: | ||
1770 | case -EBUSY: | ||
1771 | wait_for_completion(&drbg->ctr_completion); | ||
1772 | if (!drbg->ctr_async_err) { | ||
1773 | reinit_completion(&drbg->ctr_completion); | ||
1774 | break; | ||
1775 | } | ||
1776 | default: | ||
1777 | goto out; | 1759 | goto out; |
1778 | } | 1760 | |
1779 | init_completion(&drbg->ctr_completion); | 1761 | crypto_init_wait(&drbg->ctr_wait); |
1780 | 1762 | ||
1781 | memcpy(outbuf, drbg->outscratchpad, cryptlen); | 1763 | memcpy(outbuf, drbg->outscratchpad, cryptlen); |
1782 | 1764 | ||
diff --git a/crypto/ecdh.c b/crypto/ecdh.c index 4271fc77d261..3aca0933ec44 100644 --- a/crypto/ecdh.c +++ b/crypto/ecdh.c | |||
@@ -131,17 +131,11 @@ static unsigned int ecdh_max_size(struct crypto_kpp *tfm) | |||
131 | return ctx->ndigits << (ECC_DIGITS_TO_BYTES_SHIFT + 1); | 131 | return ctx->ndigits << (ECC_DIGITS_TO_BYTES_SHIFT + 1); |
132 | } | 132 | } |
133 | 133 | ||
134 | static void no_exit_tfm(struct crypto_kpp *tfm) | ||
135 | { | ||
136 | return; | ||
137 | } | ||
138 | |||
139 | static struct kpp_alg ecdh = { | 134 | static struct kpp_alg ecdh = { |
140 | .set_secret = ecdh_set_secret, | 135 | .set_secret = ecdh_set_secret, |
141 | .generate_public_key = ecdh_compute_value, | 136 | .generate_public_key = ecdh_compute_value, |
142 | .compute_shared_secret = ecdh_compute_value, | 137 | .compute_shared_secret = ecdh_compute_value, |
143 | .max_size = ecdh_max_size, | 138 | .max_size = ecdh_max_size, |
144 | .exit = no_exit_tfm, | ||
145 | .base = { | 139 | .base = { |
146 | .cra_name = "ecdh", | 140 | .cra_name = "ecdh", |
147 | .cra_driver_name = "ecdh-generic", | 141 | .cra_driver_name = "ecdh-generic", |
diff --git a/crypto/ecdh_helper.c b/crypto/ecdh_helper.c index f05bea5fd257..d3af8e8b0b5e 100644 --- a/crypto/ecdh_helper.c +++ b/crypto/ecdh_helper.c | |||
@@ -28,7 +28,7 @@ static inline const u8 *ecdh_unpack_data(void *dst, const void *src, size_t sz) | |||
28 | return src + sz; | 28 | return src + sz; |
29 | } | 29 | } |
30 | 30 | ||
31 | int crypto_ecdh_key_len(const struct ecdh *params) | 31 | unsigned int crypto_ecdh_key_len(const struct ecdh *params) |
32 | { | 32 | { |
33 | return ECDH_KPP_SECRET_MIN_SIZE + params->key_size; | 33 | return ECDH_KPP_SECRET_MIN_SIZE + params->key_size; |
34 | } | 34 | } |
diff --git a/crypto/gcm.c b/crypto/gcm.c index 3841b5eafa7e..8589681fb9f6 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c | |||
@@ -14,9 +14,9 @@ | |||
14 | #include <crypto/internal/hash.h> | 14 | #include <crypto/internal/hash.h> |
15 | #include <crypto/null.h> | 15 | #include <crypto/null.h> |
16 | #include <crypto/scatterwalk.h> | 16 | #include <crypto/scatterwalk.h> |
17 | #include <crypto/gcm.h> | ||
17 | #include <crypto/hash.h> | 18 | #include <crypto/hash.h> |
18 | #include "internal.h" | 19 | #include "internal.h" |
19 | #include <linux/completion.h> | ||
20 | #include <linux/err.h> | 20 | #include <linux/err.h> |
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
@@ -78,11 +78,6 @@ struct crypto_gcm_req_priv_ctx { | |||
78 | } u; | 78 | } u; |
79 | }; | 79 | }; |
80 | 80 | ||
81 | struct crypto_gcm_setkey_result { | ||
82 | int err; | ||
83 | struct completion completion; | ||
84 | }; | ||
85 | |||
86 | static struct { | 81 | static struct { |
87 | u8 buf[16]; | 82 | u8 buf[16]; |
88 | struct scatterlist sg; | 83 | struct scatterlist sg; |
@@ -98,17 +93,6 @@ static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( | |||
98 | return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); | 93 | return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); |
99 | } | 94 | } |
100 | 95 | ||
101 | static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err) | ||
102 | { | ||
103 | struct crypto_gcm_setkey_result *result = req->data; | ||
104 | |||
105 | if (err == -EINPROGRESS) | ||
106 | return; | ||
107 | |||
108 | result->err = err; | ||
109 | complete(&result->completion); | ||
110 | } | ||
111 | |||
112 | static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, | 96 | static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, |
113 | unsigned int keylen) | 97 | unsigned int keylen) |
114 | { | 98 | { |
@@ -119,7 +103,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, | |||
119 | be128 hash; | 103 | be128 hash; |
120 | u8 iv[16]; | 104 | u8 iv[16]; |
121 | 105 | ||
122 | struct crypto_gcm_setkey_result result; | 106 | struct crypto_wait wait; |
123 | 107 | ||
124 | struct scatterlist sg[1]; | 108 | struct scatterlist sg[1]; |
125 | struct skcipher_request req; | 109 | struct skcipher_request req; |
@@ -140,21 +124,18 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, | |||
140 | if (!data) | 124 | if (!data) |
141 | return -ENOMEM; | 125 | return -ENOMEM; |
142 | 126 | ||
143 | init_completion(&data->result.completion); | 127 | crypto_init_wait(&data->wait); |
144 | sg_init_one(data->sg, &data->hash, sizeof(data->hash)); | 128 | sg_init_one(data->sg, &data->hash, sizeof(data->hash)); |
145 | skcipher_request_set_tfm(&data->req, ctr); | 129 | skcipher_request_set_tfm(&data->req, ctr); |
146 | skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | | 130 | skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | |
147 | CRYPTO_TFM_REQ_MAY_BACKLOG, | 131 | CRYPTO_TFM_REQ_MAY_BACKLOG, |
148 | crypto_gcm_setkey_done, | 132 | crypto_req_done, |
149 | &data->result); | 133 | &data->wait); |
150 | skcipher_request_set_crypt(&data->req, data->sg, data->sg, | 134 | skcipher_request_set_crypt(&data->req, data->sg, data->sg, |
151 | sizeof(data->hash), data->iv); | 135 | sizeof(data->hash), data->iv); |
152 | 136 | ||
153 | err = crypto_skcipher_encrypt(&data->req); | 137 | err = crypto_wait_req(crypto_skcipher_encrypt(&data->req), |
154 | if (err == -EINPROGRESS || err == -EBUSY) { | 138 | &data->wait); |
155 | wait_for_completion(&data->result.completion); | ||
156 | err = data->result.err; | ||
157 | } | ||
158 | 139 | ||
159 | if (err) | 140 | if (err) |
160 | goto out; | 141 | goto out; |
@@ -197,8 +178,8 @@ static void crypto_gcm_init_common(struct aead_request *req) | |||
197 | struct scatterlist *sg; | 178 | struct scatterlist *sg; |
198 | 179 | ||
199 | memset(pctx->auth_tag, 0, sizeof(pctx->auth_tag)); | 180 | memset(pctx->auth_tag, 0, sizeof(pctx->auth_tag)); |
200 | memcpy(pctx->iv, req->iv, 12); | 181 | memcpy(pctx->iv, req->iv, GCM_AES_IV_SIZE); |
201 | memcpy(pctx->iv + 12, &counter, 4); | 182 | memcpy(pctx->iv + GCM_AES_IV_SIZE, &counter, 4); |
202 | 183 | ||
203 | sg_init_table(pctx->src, 3); | 184 | sg_init_table(pctx->src, 3); |
204 | sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag)); | 185 | sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag)); |
@@ -695,7 +676,7 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, | |||
695 | inst->alg.base.cra_alignmask = ghash->base.cra_alignmask | | 676 | inst->alg.base.cra_alignmask = ghash->base.cra_alignmask | |
696 | ctr->base.cra_alignmask; | 677 | ctr->base.cra_alignmask; |
697 | inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx); | 678 | inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx); |
698 | inst->alg.ivsize = 12; | 679 | inst->alg.ivsize = GCM_AES_IV_SIZE; |
699 | inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr); | 680 | inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr); |
700 | inst->alg.maxauthsize = 16; | 681 | inst->alg.maxauthsize = 16; |
701 | inst->alg.init = crypto_gcm_init_tfm; | 682 | inst->alg.init = crypto_gcm_init_tfm; |
@@ -832,20 +813,20 @@ static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req) | |||
832 | u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child), | 813 | u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child), |
833 | crypto_aead_alignmask(child) + 1); | 814 | crypto_aead_alignmask(child) + 1); |
834 | 815 | ||
835 | scatterwalk_map_and_copy(iv + 12, req->src, 0, req->assoclen - 8, 0); | 816 | scatterwalk_map_and_copy(iv + GCM_AES_IV_SIZE, req->src, 0, req->assoclen - 8, 0); |
836 | 817 | ||
837 | memcpy(iv, ctx->nonce, 4); | 818 | memcpy(iv, ctx->nonce, 4); |
838 | memcpy(iv + 4, req->iv, 8); | 819 | memcpy(iv + 4, req->iv, 8); |
839 | 820 | ||
840 | sg_init_table(rctx->src, 3); | 821 | sg_init_table(rctx->src, 3); |
841 | sg_set_buf(rctx->src, iv + 12, req->assoclen - 8); | 822 | sg_set_buf(rctx->src, iv + GCM_AES_IV_SIZE, req->assoclen - 8); |
842 | sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen); | 823 | sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen); |
843 | if (sg != rctx->src + 1) | 824 | if (sg != rctx->src + 1) |
844 | sg_chain(rctx->src, 2, sg); | 825 | sg_chain(rctx->src, 2, sg); |
845 | 826 | ||
846 | if (req->src != req->dst) { | 827 | if (req->src != req->dst) { |
847 | sg_init_table(rctx->dst, 3); | 828 | sg_init_table(rctx->dst, 3); |
848 | sg_set_buf(rctx->dst, iv + 12, req->assoclen - 8); | 829 | sg_set_buf(rctx->dst, iv + GCM_AES_IV_SIZE, req->assoclen - 8); |
849 | sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen); | 830 | sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen); |
850 | if (sg != rctx->dst + 1) | 831 | if (sg != rctx->dst + 1) |
851 | sg_chain(rctx->dst, 2, sg); | 832 | sg_chain(rctx->dst, 2, sg); |
@@ -957,7 +938,7 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl, | |||
957 | err = -EINVAL; | 938 | err = -EINVAL; |
958 | 939 | ||
959 | /* Underlying IV size must be 12. */ | 940 | /* Underlying IV size must be 12. */ |
960 | if (crypto_aead_alg_ivsize(alg) != 12) | 941 | if (crypto_aead_alg_ivsize(alg) != GCM_AES_IV_SIZE) |
961 | goto out_drop_alg; | 942 | goto out_drop_alg; |
962 | 943 | ||
963 | /* Not a stream cipher? */ | 944 | /* Not a stream cipher? */ |
@@ -980,7 +961,7 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl, | |||
980 | 961 | ||
981 | inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx); | 962 | inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx); |
982 | 963 | ||
983 | inst->alg.ivsize = 8; | 964 | inst->alg.ivsize = GCM_RFC4106_IV_SIZE; |
984 | inst->alg.chunksize = crypto_aead_alg_chunksize(alg); | 965 | inst->alg.chunksize = crypto_aead_alg_chunksize(alg); |
985 | inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); | 966 | inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); |
986 | 967 | ||
@@ -1134,7 +1115,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm) | |||
1134 | tfm, | 1115 | tfm, |
1135 | sizeof(struct crypto_rfc4543_req_ctx) + | 1116 | sizeof(struct crypto_rfc4543_req_ctx) + |
1136 | ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) + | 1117 | ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) + |
1137 | align + 12); | 1118 | align + GCM_AES_IV_SIZE); |
1138 | 1119 | ||
1139 | return 0; | 1120 | return 0; |
1140 | 1121 | ||
@@ -1199,7 +1180,7 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl, | |||
1199 | err = -EINVAL; | 1180 | err = -EINVAL; |
1200 | 1181 | ||
1201 | /* Underlying IV size must be 12. */ | 1182 | /* Underlying IV size must be 12. */ |
1202 | if (crypto_aead_alg_ivsize(alg) != 12) | 1183 | if (crypto_aead_alg_ivsize(alg) != GCM_AES_IV_SIZE) |
1203 | goto out_drop_alg; | 1184 | goto out_drop_alg; |
1204 | 1185 | ||
1205 | /* Not a stream cipher? */ | 1186 | /* Not a stream cipher? */ |
@@ -1222,7 +1203,7 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl, | |||
1222 | 1203 | ||
1223 | inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx); | 1204 | inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx); |
1224 | 1205 | ||
1225 | inst->alg.ivsize = 8; | 1206 | inst->alg.ivsize = GCM_RFC4543_IV_SIZE; |
1226 | inst->alg.chunksize = crypto_aead_alg_chunksize(alg); | 1207 | inst->alg.chunksize = crypto_aead_alg_chunksize(alg); |
1227 | inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); | 1208 | inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); |
1228 | 1209 | ||
diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c index dc012129c063..24e601954c7a 100644 --- a/crypto/gf128mul.c +++ b/crypto/gf128mul.c | |||
@@ -156,6 +156,19 @@ static void gf128mul_x8_bbe(be128 *x) | |||
156 | x->b = cpu_to_be64((b << 8) ^ _tt); | 156 | x->b = cpu_to_be64((b << 8) ^ _tt); |
157 | } | 157 | } |
158 | 158 | ||
159 | void gf128mul_x8_ble(le128 *r, const le128 *x) | ||
160 | { | ||
161 | u64 a = le64_to_cpu(x->a); | ||
162 | u64 b = le64_to_cpu(x->b); | ||
163 | |||
164 | /* equivalent to gf128mul_table_be[b >> 63] (see crypto/gf128mul.c): */ | ||
165 | u64 _tt = gf128mul_table_be[a >> 56]; | ||
166 | |||
167 | r->a = cpu_to_le64((a << 8) | (b >> 56)); | ||
168 | r->b = cpu_to_le64((b << 8) ^ _tt); | ||
169 | } | ||
170 | EXPORT_SYMBOL(gf128mul_x8_ble); | ||
171 | |||
159 | void gf128mul_lle(be128 *r, const be128 *b) | 172 | void gf128mul_lle(be128 *r, const be128 *b) |
160 | { | 173 | { |
161 | be128 p[8]; | 174 | be128 p[8]; |
diff --git a/crypto/keywrap.c b/crypto/keywrap.c index 72014f963ba7..744e35134c45 100644 --- a/crypto/keywrap.c +++ b/crypto/keywrap.c | |||
@@ -93,18 +93,10 @@ struct crypto_kw_ctx { | |||
93 | 93 | ||
94 | struct crypto_kw_block { | 94 | struct crypto_kw_block { |
95 | #define SEMIBSIZE 8 | 95 | #define SEMIBSIZE 8 |
96 | u8 A[SEMIBSIZE]; | 96 | __be64 A; |
97 | u8 R[SEMIBSIZE]; | 97 | __be64 R; |
98 | }; | 98 | }; |
99 | 99 | ||
100 | /* convert 64 bit integer into its string representation */ | ||
101 | static inline void crypto_kw_cpu_to_be64(u64 val, u8 *buf) | ||
102 | { | ||
103 | __be64 *a = (__be64 *)buf; | ||
104 | |||
105 | *a = cpu_to_be64(val); | ||
106 | } | ||
107 | |||
108 | /* | 100 | /* |
109 | * Fast forward the SGL to the "end" length minus SEMIBSIZE. | 101 | * Fast forward the SGL to the "end" length minus SEMIBSIZE. |
110 | * The start in the SGL defined by the fast-forward is returned with | 102 | * The start in the SGL defined by the fast-forward is returned with |
@@ -139,17 +131,10 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc, | |||
139 | struct crypto_blkcipher *tfm = desc->tfm; | 131 | struct crypto_blkcipher *tfm = desc->tfm; |
140 | struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm); | 132 | struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm); |
141 | struct crypto_cipher *child = ctx->child; | 133 | struct crypto_cipher *child = ctx->child; |
142 | 134 | struct crypto_kw_block block; | |
143 | unsigned long alignmask = max_t(unsigned long, SEMIBSIZE, | ||
144 | crypto_cipher_alignmask(child)); | ||
145 | unsigned int i; | ||
146 | |||
147 | u8 blockbuf[sizeof(struct crypto_kw_block) + alignmask]; | ||
148 | struct crypto_kw_block *block = (struct crypto_kw_block *) | ||
149 | PTR_ALIGN(blockbuf + 0, alignmask + 1); | ||
150 | |||
151 | u64 t = 6 * ((nbytes) >> 3); | ||
152 | struct scatterlist *lsrc, *ldst; | 135 | struct scatterlist *lsrc, *ldst; |
136 | u64 t = 6 * ((nbytes) >> 3); | ||
137 | unsigned int i; | ||
153 | int ret = 0; | 138 | int ret = 0; |
154 | 139 | ||
155 | /* | 140 | /* |
@@ -160,7 +145,7 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc, | |||
160 | return -EINVAL; | 145 | return -EINVAL; |
161 | 146 | ||
162 | /* Place the IV into block A */ | 147 | /* Place the IV into block A */ |
163 | memcpy(block->A, desc->info, SEMIBSIZE); | 148 | memcpy(&block.A, desc->info, SEMIBSIZE); |
164 | 149 | ||
165 | /* | 150 | /* |
166 | * src scatterlist is read-only. dst scatterlist is r/w. During the | 151 | * src scatterlist is read-only. dst scatterlist is r/w. During the |
@@ -171,32 +156,27 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc, | |||
171 | ldst = dst; | 156 | ldst = dst; |
172 | 157 | ||
173 | for (i = 0; i < 6; i++) { | 158 | for (i = 0; i < 6; i++) { |
174 | u8 tbe_buffer[SEMIBSIZE + alignmask]; | ||
175 | /* alignment for the crypto_xor and the _to_be64 operation */ | ||
176 | u8 *tbe = PTR_ALIGN(tbe_buffer + 0, alignmask + 1); | ||
177 | unsigned int tmp_nbytes = nbytes; | ||
178 | struct scatter_walk src_walk, dst_walk; | 159 | struct scatter_walk src_walk, dst_walk; |
160 | unsigned int tmp_nbytes = nbytes; | ||
179 | 161 | ||
180 | while (tmp_nbytes) { | 162 | while (tmp_nbytes) { |
181 | /* move pointer by tmp_nbytes in the SGL */ | 163 | /* move pointer by tmp_nbytes in the SGL */ |
182 | crypto_kw_scatterlist_ff(&src_walk, lsrc, tmp_nbytes); | 164 | crypto_kw_scatterlist_ff(&src_walk, lsrc, tmp_nbytes); |
183 | /* get the source block */ | 165 | /* get the source block */ |
184 | scatterwalk_copychunks(block->R, &src_walk, SEMIBSIZE, | 166 | scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE, |
185 | false); | 167 | false); |
186 | 168 | ||
187 | /* perform KW operation: get counter as byte string */ | ||
188 | crypto_kw_cpu_to_be64(t, tbe); | ||
189 | /* perform KW operation: modify IV with counter */ | 169 | /* perform KW operation: modify IV with counter */ |
190 | crypto_xor(block->A, tbe, SEMIBSIZE); | 170 | block.A ^= cpu_to_be64(t); |
191 | t--; | 171 | t--; |
192 | /* perform KW operation: decrypt block */ | 172 | /* perform KW operation: decrypt block */ |
193 | crypto_cipher_decrypt_one(child, (u8*)block, | 173 | crypto_cipher_decrypt_one(child, (u8*)&block, |
194 | (u8*)block); | 174 | (u8*)&block); |
195 | 175 | ||
196 | /* move pointer by tmp_nbytes in the SGL */ | 176 | /* move pointer by tmp_nbytes in the SGL */ |
197 | crypto_kw_scatterlist_ff(&dst_walk, ldst, tmp_nbytes); | 177 | crypto_kw_scatterlist_ff(&dst_walk, ldst, tmp_nbytes); |
198 | /* Copy block->R into place */ | 178 | /* Copy block->R into place */ |
199 | scatterwalk_copychunks(block->R, &dst_walk, SEMIBSIZE, | 179 | scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE, |
200 | true); | 180 | true); |
201 | 181 | ||
202 | tmp_nbytes -= SEMIBSIZE; | 182 | tmp_nbytes -= SEMIBSIZE; |
@@ -208,11 +188,10 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc, | |||
208 | } | 188 | } |
209 | 189 | ||
210 | /* Perform authentication check */ | 190 | /* Perform authentication check */ |
211 | if (crypto_memneq("\xA6\xA6\xA6\xA6\xA6\xA6\xA6\xA6", block->A, | 191 | if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6)) |
212 | SEMIBSIZE)) | ||
213 | ret = -EBADMSG; | 192 | ret = -EBADMSG; |
214 | 193 | ||
215 | memzero_explicit(block, sizeof(struct crypto_kw_block)); | 194 | memzero_explicit(&block, sizeof(struct crypto_kw_block)); |
216 | 195 | ||
217 | return ret; | 196 | return ret; |
218 | } | 197 | } |
@@ -224,17 +203,10 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc, | |||
224 | struct crypto_blkcipher *tfm = desc->tfm; | 203 | struct crypto_blkcipher *tfm = desc->tfm; |
225 | struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm); | 204 | struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm); |
226 | struct crypto_cipher *child = ctx->child; | 205 | struct crypto_cipher *child = ctx->child; |
227 | 206 | struct crypto_kw_block block; | |
228 | unsigned long alignmask = max_t(unsigned long, SEMIBSIZE, | ||
229 | crypto_cipher_alignmask(child)); | ||
230 | unsigned int i; | ||
231 | |||
232 | u8 blockbuf[sizeof(struct crypto_kw_block) + alignmask]; | ||
233 | struct crypto_kw_block *block = (struct crypto_kw_block *) | ||
234 | PTR_ALIGN(blockbuf + 0, alignmask + 1); | ||
235 | |||
236 | u64 t = 1; | ||
237 | struct scatterlist *lsrc, *ldst; | 207 | struct scatterlist *lsrc, *ldst; |
208 | u64 t = 1; | ||
209 | unsigned int i; | ||
238 | 210 | ||
239 | /* | 211 | /* |
240 | * Require at least 2 semiblocks (note, the 3rd semiblock that is | 212 | * Require at least 2 semiblocks (note, the 3rd semiblock that is |
@@ -249,7 +221,7 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc, | |||
249 | * Place the predefined IV into block A -- for encrypt, the caller | 221 | * Place the predefined IV into block A -- for encrypt, the caller |
250 | * does not need to provide an IV, but he needs to fetch the final IV. | 222 | * does not need to provide an IV, but he needs to fetch the final IV. |
251 | */ | 223 | */ |
252 | memcpy(block->A, "\xA6\xA6\xA6\xA6\xA6\xA6\xA6\xA6", SEMIBSIZE); | 224 | block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6); |
253 | 225 | ||
254 | /* | 226 | /* |
255 | * src scatterlist is read-only. dst scatterlist is r/w. During the | 227 | * src scatterlist is read-only. dst scatterlist is r/w. During the |
@@ -260,30 +232,26 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc, | |||
260 | ldst = dst; | 232 | ldst = dst; |
261 | 233 | ||
262 | for (i = 0; i < 6; i++) { | 234 | for (i = 0; i < 6; i++) { |
263 | u8 tbe_buffer[SEMIBSIZE + alignmask]; | ||
264 | u8 *tbe = PTR_ALIGN(tbe_buffer + 0, alignmask + 1); | ||
265 | unsigned int tmp_nbytes = nbytes; | ||
266 | struct scatter_walk src_walk, dst_walk; | 235 | struct scatter_walk src_walk, dst_walk; |
236 | unsigned int tmp_nbytes = nbytes; | ||
267 | 237 | ||
268 | scatterwalk_start(&src_walk, lsrc); | 238 | scatterwalk_start(&src_walk, lsrc); |
269 | scatterwalk_start(&dst_walk, ldst); | 239 | scatterwalk_start(&dst_walk, ldst); |
270 | 240 | ||
271 | while (tmp_nbytes) { | 241 | while (tmp_nbytes) { |
272 | /* get the source block */ | 242 | /* get the source block */ |
273 | scatterwalk_copychunks(block->R, &src_walk, SEMIBSIZE, | 243 | scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE, |
274 | false); | 244 | false); |
275 | 245 | ||
276 | /* perform KW operation: encrypt block */ | 246 | /* perform KW operation: encrypt block */ |
277 | crypto_cipher_encrypt_one(child, (u8 *)block, | 247 | crypto_cipher_encrypt_one(child, (u8 *)&block, |
278 | (u8 *)block); | 248 | (u8 *)&block); |
279 | /* perform KW operation: get counter as byte string */ | ||
280 | crypto_kw_cpu_to_be64(t, tbe); | ||
281 | /* perform KW operation: modify IV with counter */ | 249 | /* perform KW operation: modify IV with counter */ |
282 | crypto_xor(block->A, tbe, SEMIBSIZE); | 250 | block.A ^= cpu_to_be64(t); |
283 | t++; | 251 | t++; |
284 | 252 | ||
285 | /* Copy block->R into place */ | 253 | /* Copy block->R into place */ |
286 | scatterwalk_copychunks(block->R, &dst_walk, SEMIBSIZE, | 254 | scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE, |
287 | true); | 255 | true); |
288 | 256 | ||
289 | tmp_nbytes -= SEMIBSIZE; | 257 | tmp_nbytes -= SEMIBSIZE; |
@@ -295,9 +263,9 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc, | |||
295 | } | 263 | } |
296 | 264 | ||
297 | /* establish the IV for the caller to pick up */ | 265 | /* establish the IV for the caller to pick up */ |
298 | memcpy(desc->info, block->A, SEMIBSIZE); | 266 | memcpy(desc->info, &block.A, SEMIBSIZE); |
299 | 267 | ||
300 | memzero_explicit(block, sizeof(struct crypto_kw_block)); | 268 | memzero_explicit(&block, sizeof(struct crypto_kw_block)); |
301 | 269 | ||
302 | return 0; | 270 | return 0; |
303 | } | 271 | } |
diff --git a/crypto/lrw.c b/crypto/lrw.c index a8bfae4451bf..cbbd7c50ad19 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c | |||
@@ -328,9 +328,7 @@ static int do_encrypt(struct skcipher_request *req, int err) | |||
328 | crypto_skcipher_encrypt(subreq) ?: | 328 | crypto_skcipher_encrypt(subreq) ?: |
329 | post_crypt(req); | 329 | post_crypt(req); |
330 | 330 | ||
331 | if (err == -EINPROGRESS || | 331 | if (err == -EINPROGRESS || err == -EBUSY) |
332 | (err == -EBUSY && | ||
333 | req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | ||
334 | return err; | 332 | return err; |
335 | } | 333 | } |
336 | 334 | ||
@@ -380,9 +378,7 @@ static int do_decrypt(struct skcipher_request *req, int err) | |||
380 | crypto_skcipher_decrypt(subreq) ?: | 378 | crypto_skcipher_decrypt(subreq) ?: |
381 | post_crypt(req); | 379 | post_crypt(req); |
382 | 380 | ||
383 | if (err == -EINPROGRESS || | 381 | if (err == -EINPROGRESS || err == -EBUSY) |
384 | (err == -EBUSY && | ||
385 | req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | ||
386 | return err; | 382 | return err; |
387 | } | 383 | } |
388 | 384 | ||
@@ -610,9 +606,12 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) | |||
610 | ecb_name[len - 1] = 0; | 606 | ecb_name[len - 1] = 0; |
611 | 607 | ||
612 | if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, | 608 | if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, |
613 | "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) | 609 | "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) { |
614 | return -ENAMETOOLONG; | 610 | err = -ENAMETOOLONG; |
615 | } | 611 | goto err_drop_spawn; |
612 | } | ||
613 | } else | ||
614 | goto err_drop_spawn; | ||
616 | 615 | ||
617 | inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; | 616 | inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; |
618 | inst->alg.base.cra_priority = alg->base.cra_priority; | 617 | inst->alg.base.cra_priority = alg->base.cra_priority; |
diff --git a/crypto/rmd128.c b/crypto/rmd128.c index 049486ede938..40e053b97b69 100644 --- a/crypto/rmd128.c +++ b/crypto/rmd128.c | |||
@@ -213,8 +213,6 @@ static void rmd128_transform(u32 *state, const __le32 *in) | |||
213 | state[2] = state[3] + aa + bbb; | 213 | state[2] = state[3] + aa + bbb; |
214 | state[3] = state[0] + bb + ccc; | 214 | state[3] = state[0] + bb + ccc; |
215 | state[0] = ddd; | 215 | state[0] = ddd; |
216 | |||
217 | return; | ||
218 | } | 216 | } |
219 | 217 | ||
220 | static int rmd128_init(struct shash_desc *desc) | 218 | static int rmd128_init(struct shash_desc *desc) |
diff --git a/crypto/rmd160.c b/crypto/rmd160.c index de585e51d455..5f3e6ea35268 100644 --- a/crypto/rmd160.c +++ b/crypto/rmd160.c | |||
@@ -256,8 +256,6 @@ static void rmd160_transform(u32 *state, const __le32 *in) | |||
256 | state[3] = state[4] + aa + bbb; | 256 | state[3] = state[4] + aa + bbb; |
257 | state[4] = state[0] + bb + ccc; | 257 | state[4] = state[0] + bb + ccc; |
258 | state[0] = ddd; | 258 | state[0] = ddd; |
259 | |||
260 | return; | ||
261 | } | 259 | } |
262 | 260 | ||
263 | static int rmd160_init(struct shash_desc *desc) | 261 | static int rmd160_init(struct shash_desc *desc) |
diff --git a/crypto/rmd256.c b/crypto/rmd256.c index 4ec02a754e09..f50c025cc962 100644 --- a/crypto/rmd256.c +++ b/crypto/rmd256.c | |||
@@ -228,8 +228,6 @@ static void rmd256_transform(u32 *state, const __le32 *in) | |||
228 | state[5] += bbb; | 228 | state[5] += bbb; |
229 | state[6] += ccc; | 229 | state[6] += ccc; |
230 | state[7] += ddd; | 230 | state[7] += ddd; |
231 | |||
232 | return; | ||
233 | } | 231 | } |
234 | 232 | ||
235 | static int rmd256_init(struct shash_desc *desc) | 233 | static int rmd256_init(struct shash_desc *desc) |
diff --git a/crypto/rmd320.c b/crypto/rmd320.c index 770f2cb369f8..e1315e4869e8 100644 --- a/crypto/rmd320.c +++ b/crypto/rmd320.c | |||
@@ -275,8 +275,6 @@ static void rmd320_transform(u32 *state, const __le32 *in) | |||
275 | state[7] += ccc; | 275 | state[7] += ccc; |
276 | state[8] += ddd; | 276 | state[8] += ddd; |
277 | state[9] += eee; | 277 | state[9] += eee; |
278 | |||
279 | return; | ||
280 | } | 278 | } |
281 | 279 | ||
282 | static int rmd320_init(struct shash_desc *desc) | 280 | static int rmd320_init(struct shash_desc *desc) |
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index 407c64bdcdd9..2908f93c3e55 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c | |||
@@ -279,9 +279,7 @@ static int pkcs1pad_encrypt(struct akcipher_request *req) | |||
279 | req->dst, ctx->key_size - 1, req->dst_len); | 279 | req->dst, ctx->key_size - 1, req->dst_len); |
280 | 280 | ||
281 | err = crypto_akcipher_encrypt(&req_ctx->child_req); | 281 | err = crypto_akcipher_encrypt(&req_ctx->child_req); |
282 | if (err != -EINPROGRESS && | 282 | if (err != -EINPROGRESS && err != -EBUSY) |
283 | (err != -EBUSY || | ||
284 | !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) | ||
285 | return pkcs1pad_encrypt_sign_complete(req, err); | 283 | return pkcs1pad_encrypt_sign_complete(req, err); |
286 | 284 | ||
287 | return err; | 285 | return err; |
@@ -383,9 +381,7 @@ static int pkcs1pad_decrypt(struct akcipher_request *req) | |||
383 | ctx->key_size); | 381 | ctx->key_size); |
384 | 382 | ||
385 | err = crypto_akcipher_decrypt(&req_ctx->child_req); | 383 | err = crypto_akcipher_decrypt(&req_ctx->child_req); |
386 | if (err != -EINPROGRESS && | 384 | if (err != -EINPROGRESS && err != -EBUSY) |
387 | (err != -EBUSY || | ||
388 | !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) | ||
389 | return pkcs1pad_decrypt_complete(req, err); | 385 | return pkcs1pad_decrypt_complete(req, err); |
390 | 386 | ||
391 | return err; | 387 | return err; |
@@ -440,9 +436,7 @@ static int pkcs1pad_sign(struct akcipher_request *req) | |||
440 | req->dst, ctx->key_size - 1, req->dst_len); | 436 | req->dst, ctx->key_size - 1, req->dst_len); |
441 | 437 | ||
442 | err = crypto_akcipher_sign(&req_ctx->child_req); | 438 | err = crypto_akcipher_sign(&req_ctx->child_req); |
443 | if (err != -EINPROGRESS && | 439 | if (err != -EINPROGRESS && err != -EBUSY) |
444 | (err != -EBUSY || | ||
445 | !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) | ||
446 | return pkcs1pad_encrypt_sign_complete(req, err); | 440 | return pkcs1pad_encrypt_sign_complete(req, err); |
447 | 441 | ||
448 | return err; | 442 | return err; |
@@ -561,9 +555,7 @@ static int pkcs1pad_verify(struct akcipher_request *req) | |||
561 | ctx->key_size); | 555 | ctx->key_size); |
562 | 556 | ||
563 | err = crypto_akcipher_verify(&req_ctx->child_req); | 557 | err = crypto_akcipher_verify(&req_ctx->child_req); |
564 | if (err != -EINPROGRESS && | 558 | if (err != -EINPROGRESS && err != -EBUSY) |
565 | (err != -EBUSY || | ||
566 | !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) | ||
567 | return pkcs1pad_verify_complete(req, err); | 559 | return pkcs1pad_verify_complete(req, err); |
568 | 560 | ||
569 | return err; | 561 | return err; |
diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c new file mode 100644 index 000000000000..9e823d99f095 --- /dev/null +++ b/crypto/sm3_generic.c | |||
@@ -0,0 +1,210 @@ | |||
1 | /* | ||
2 | * SM3 secure hash, as specified by OSCCA GM/T 0004-2012 SM3 and | ||
3 | * described at https://tools.ietf.org/html/draft-shen-sm3-hash-01 | ||
4 | * | ||
5 | * Copyright (C) 2017 ARM Limited or its affiliates. | ||
6 | * Written by Gilad Ben-Yossef <gilad@benyossef.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include <crypto/internal/hash.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/mm.h> | ||
25 | #include <linux/types.h> | ||
26 | #include <crypto/sm3.h> | ||
27 | #include <crypto/sm3_base.h> | ||
28 | #include <linux/bitops.h> | ||
29 | #include <asm/byteorder.h> | ||
30 | #include <asm/unaligned.h> | ||
31 | |||
32 | const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE] = { | ||
33 | 0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F, | ||
34 | 0x8e, 0x61, 0x19, 0x48, 0x31, 0xE8, 0x1A, 0x8F, | ||
35 | 0x22, 0xBE, 0xC8, 0xC7, 0x28, 0xFE, 0xFB, 0x74, | ||
36 | 0x7E, 0xD0, 0x35, 0xEB, 0x50, 0x82, 0xAA, 0x2B | ||
37 | }; | ||
38 | EXPORT_SYMBOL_GPL(sm3_zero_message_hash); | ||
39 | |||
40 | static inline u32 p0(u32 x) | ||
41 | { | ||
42 | return x ^ rol32(x, 9) ^ rol32(x, 17); | ||
43 | } | ||
44 | |||
45 | static inline u32 p1(u32 x) | ||
46 | { | ||
47 | return x ^ rol32(x, 15) ^ rol32(x, 23); | ||
48 | } | ||
49 | |||
50 | static inline u32 ff(unsigned int n, u32 a, u32 b, u32 c) | ||
51 | { | ||
52 | return (n < 16) ? (a ^ b ^ c) : ((a & b) | (a & c) | (b & c)); | ||
53 | } | ||
54 | |||
55 | static inline u32 gg(unsigned int n, u32 e, u32 f, u32 g) | ||
56 | { | ||
57 | return (n < 16) ? (e ^ f ^ g) : ((e & f) | ((~e) & g)); | ||
58 | } | ||
59 | |||
60 | static inline u32 t(unsigned int n) | ||
61 | { | ||
62 | return (n < 16) ? SM3_T1 : SM3_T2; | ||
63 | } | ||
64 | |||
65 | static void sm3_expand(u32 *t, u32 *w, u32 *wt) | ||
66 | { | ||
67 | int i; | ||
68 | unsigned int tmp; | ||
69 | |||
70 | /* load the input */ | ||
71 | for (i = 0; i <= 15; i++) | ||
72 | w[i] = get_unaligned_be32((__u32 *)t + i); | ||
73 | |||
74 | for (i = 16; i <= 67; i++) { | ||
75 | tmp = w[i - 16] ^ w[i - 9] ^ rol32(w[i - 3], 15); | ||
76 | w[i] = p1(tmp) ^ (rol32(w[i - 13], 7)) ^ w[i - 6]; | ||
77 | } | ||
78 | |||
79 | for (i = 0; i <= 63; i++) | ||
80 | wt[i] = w[i] ^ w[i + 4]; | ||
81 | } | ||
82 | |||
83 | static void sm3_compress(u32 *w, u32 *wt, u32 *m) | ||
84 | { | ||
85 | u32 ss1; | ||
86 | u32 ss2; | ||
87 | u32 tt1; | ||
88 | u32 tt2; | ||
89 | u32 a, b, c, d, e, f, g, h; | ||
90 | int i; | ||
91 | |||
92 | a = m[0]; | ||
93 | b = m[1]; | ||
94 | c = m[2]; | ||
95 | d = m[3]; | ||
96 | e = m[4]; | ||
97 | f = m[5]; | ||
98 | g = m[6]; | ||
99 | h = m[7]; | ||
100 | |||
101 | for (i = 0; i <= 63; i++) { | ||
102 | |||
103 | ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7); | ||
104 | |||
105 | ss2 = ss1 ^ rol32(a, 12); | ||
106 | |||
107 | tt1 = ff(i, a, b, c) + d + ss2 + *wt; | ||
108 | wt++; | ||
109 | |||
110 | tt2 = gg(i, e, f, g) + h + ss1 + *w; | ||
111 | w++; | ||
112 | |||
113 | d = c; | ||
114 | c = rol32(b, 9); | ||
115 | b = a; | ||
116 | a = tt1; | ||
117 | h = g; | ||
118 | g = rol32(f, 19); | ||
119 | f = e; | ||
120 | e = p0(tt2); | ||
121 | } | ||
122 | |||
123 | m[0] = a ^ m[0]; | ||
124 | m[1] = b ^ m[1]; | ||
125 | m[2] = c ^ m[2]; | ||
126 | m[3] = d ^ m[3]; | ||
127 | m[4] = e ^ m[4]; | ||
128 | m[5] = f ^ m[5]; | ||
129 | m[6] = g ^ m[6]; | ||
130 | m[7] = h ^ m[7]; | ||
131 | |||
132 | a = b = c = d = e = f = g = h = ss1 = ss2 = tt1 = tt2 = 0; | ||
133 | } | ||
134 | |||
135 | static void sm3_transform(struct sm3_state *sst, u8 const *src) | ||
136 | { | ||
137 | unsigned int w[68]; | ||
138 | unsigned int wt[64]; | ||
139 | |||
140 | sm3_expand((u32 *)src, w, wt); | ||
141 | sm3_compress(w, wt, sst->state); | ||
142 | |||
143 | memzero_explicit(w, sizeof(w)); | ||
144 | memzero_explicit(wt, sizeof(wt)); | ||
145 | } | ||
146 | |||
147 | static void sm3_generic_block_fn(struct sm3_state *sst, u8 const *src, | ||
148 | int blocks) | ||
149 | { | ||
150 | while (blocks--) { | ||
151 | sm3_transform(sst, src); | ||
152 | src += SM3_BLOCK_SIZE; | ||
153 | } | ||
154 | } | ||
155 | |||
156 | int crypto_sm3_update(struct shash_desc *desc, const u8 *data, | ||
157 | unsigned int len) | ||
158 | { | ||
159 | return sm3_base_do_update(desc, data, len, sm3_generic_block_fn); | ||
160 | } | ||
161 | EXPORT_SYMBOL(crypto_sm3_update); | ||
162 | |||
163 | static int sm3_final(struct shash_desc *desc, u8 *out) | ||
164 | { | ||
165 | sm3_base_do_finalize(desc, sm3_generic_block_fn); | ||
166 | return sm3_base_finish(desc, out); | ||
167 | } | ||
168 | |||
169 | int crypto_sm3_finup(struct shash_desc *desc, const u8 *data, | ||
170 | unsigned int len, u8 *hash) | ||
171 | { | ||
172 | sm3_base_do_update(desc, data, len, sm3_generic_block_fn); | ||
173 | return sm3_final(desc, hash); | ||
174 | } | ||
175 | EXPORT_SYMBOL(crypto_sm3_finup); | ||
176 | |||
177 | static struct shash_alg sm3_alg = { | ||
178 | .digestsize = SM3_DIGEST_SIZE, | ||
179 | .init = sm3_base_init, | ||
180 | .update = crypto_sm3_update, | ||
181 | .final = sm3_final, | ||
182 | .finup = crypto_sm3_finup, | ||
183 | .descsize = sizeof(struct sm3_state), | ||
184 | .base = { | ||
185 | .cra_name = "sm3", | ||
186 | .cra_driver_name = "sm3-generic", | ||
187 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
188 | .cra_blocksize = SM3_BLOCK_SIZE, | ||
189 | .cra_module = THIS_MODULE, | ||
190 | } | ||
191 | }; | ||
192 | |||
193 | static int __init sm3_generic_mod_init(void) | ||
194 | { | ||
195 | return crypto_register_shash(&sm3_alg); | ||
196 | } | ||
197 | |||
198 | static void __exit sm3_generic_mod_fini(void) | ||
199 | { | ||
200 | crypto_unregister_shash(&sm3_alg); | ||
201 | } | ||
202 | |||
203 | module_init(sm3_generic_mod_init); | ||
204 | module_exit(sm3_generic_mod_fini); | ||
205 | |||
206 | MODULE_LICENSE("GPL v2"); | ||
207 | MODULE_DESCRIPTION("SM3 Secure Hash Algorithm"); | ||
208 | |||
209 | MODULE_ALIAS_CRYPTO("sm3"); | ||
210 | MODULE_ALIAS_CRYPTO("sm3-generic"); | ||
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 0022a18d36ee..9267cbdb14d2 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c | |||
@@ -70,7 +70,7 @@ static int mode; | |||
70 | static char *tvmem[TVMEMSIZE]; | 70 | static char *tvmem[TVMEMSIZE]; |
71 | 71 | ||
72 | static char *check[] = { | 72 | static char *check[] = { |
73 | "des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256", | 73 | "des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256", "sm3", |
74 | "blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes", | 74 | "blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes", |
75 | "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", | 75 | "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", |
76 | "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt", | 76 | "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt", |
@@ -79,34 +79,11 @@ static char *check[] = { | |||
79 | NULL | 79 | NULL |
80 | }; | 80 | }; |
81 | 81 | ||
82 | struct tcrypt_result { | ||
83 | struct completion completion; | ||
84 | int err; | ||
85 | }; | ||
86 | |||
87 | static void tcrypt_complete(struct crypto_async_request *req, int err) | ||
88 | { | ||
89 | struct tcrypt_result *res = req->data; | ||
90 | |||
91 | if (err == -EINPROGRESS) | ||
92 | return; | ||
93 | |||
94 | res->err = err; | ||
95 | complete(&res->completion); | ||
96 | } | ||
97 | |||
98 | static inline int do_one_aead_op(struct aead_request *req, int ret) | 82 | static inline int do_one_aead_op(struct aead_request *req, int ret) |
99 | { | 83 | { |
100 | if (ret == -EINPROGRESS || ret == -EBUSY) { | 84 | struct crypto_wait *wait = req->base.data; |
101 | struct tcrypt_result *tr = req->base.data; | ||
102 | 85 | ||
103 | ret = wait_for_completion_interruptible(&tr->completion); | 86 | return crypto_wait_req(ret, wait); |
104 | if (!ret) | ||
105 | ret = tr->err; | ||
106 | reinit_completion(&tr->completion); | ||
107 | } | ||
108 | |||
109 | return ret; | ||
110 | } | 87 | } |
111 | 88 | ||
112 | static int test_aead_jiffies(struct aead_request *req, int enc, | 89 | static int test_aead_jiffies(struct aead_request *req, int enc, |
@@ -248,7 +225,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, | |||
248 | char *axbuf[XBUFSIZE]; | 225 | char *axbuf[XBUFSIZE]; |
249 | unsigned int *b_size; | 226 | unsigned int *b_size; |
250 | unsigned int iv_len; | 227 | unsigned int iv_len; |
251 | struct tcrypt_result result; | 228 | struct crypto_wait wait; |
252 | 229 | ||
253 | iv = kzalloc(MAX_IVLEN, GFP_KERNEL); | 230 | iv = kzalloc(MAX_IVLEN, GFP_KERNEL); |
254 | if (!iv) | 231 | if (!iv) |
@@ -284,7 +261,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, | |||
284 | goto out_notfm; | 261 | goto out_notfm; |
285 | } | 262 | } |
286 | 263 | ||
287 | init_completion(&result.completion); | 264 | crypto_init_wait(&wait); |
288 | printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo, | 265 | printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo, |
289 | get_driver_name(crypto_aead, tfm), e); | 266 | get_driver_name(crypto_aead, tfm), e); |
290 | 267 | ||
@@ -296,7 +273,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, | |||
296 | } | 273 | } |
297 | 274 | ||
298 | aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | 275 | aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
299 | tcrypt_complete, &result); | 276 | crypto_req_done, &wait); |
300 | 277 | ||
301 | i = 0; | 278 | i = 0; |
302 | do { | 279 | do { |
@@ -340,7 +317,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, | |||
340 | } | 317 | } |
341 | 318 | ||
342 | sg_init_aead(sg, xbuf, | 319 | sg_init_aead(sg, xbuf, |
343 | *b_size + (enc ? authsize : 0)); | 320 | *b_size + (enc ? 0 : authsize)); |
344 | 321 | ||
345 | sg_init_aead(sgout, xoutbuf, | 322 | sg_init_aead(sgout, xoutbuf, |
346 | *b_size + (enc ? authsize : 0)); | 323 | *b_size + (enc ? authsize : 0)); |
@@ -348,7 +325,9 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, | |||
348 | sg_set_buf(&sg[0], assoc, aad_size); | 325 | sg_set_buf(&sg[0], assoc, aad_size); |
349 | sg_set_buf(&sgout[0], assoc, aad_size); | 326 | sg_set_buf(&sgout[0], assoc, aad_size); |
350 | 327 | ||
351 | aead_request_set_crypt(req, sg, sgout, *b_size, iv); | 328 | aead_request_set_crypt(req, sg, sgout, |
329 | *b_size + (enc ? 0 : authsize), | ||
330 | iv); | ||
352 | aead_request_set_ad(req, aad_size); | 331 | aead_request_set_ad(req, aad_size); |
353 | 332 | ||
354 | if (secs) | 333 | if (secs) |
@@ -381,7 +360,6 @@ out_noaxbuf: | |||
381 | testmgr_free_buf(xbuf); | 360 | testmgr_free_buf(xbuf); |
382 | out_noxbuf: | 361 | out_noxbuf: |
383 | kfree(iv); | 362 | kfree(iv); |
384 | return; | ||
385 | } | 363 | } |
386 | 364 | ||
387 | static void test_hash_sg_init(struct scatterlist *sg) | 365 | static void test_hash_sg_init(struct scatterlist *sg) |
@@ -397,21 +375,16 @@ static void test_hash_sg_init(struct scatterlist *sg) | |||
397 | 375 | ||
398 | static inline int do_one_ahash_op(struct ahash_request *req, int ret) | 376 | static inline int do_one_ahash_op(struct ahash_request *req, int ret) |
399 | { | 377 | { |
400 | if (ret == -EINPROGRESS || ret == -EBUSY) { | 378 | struct crypto_wait *wait = req->base.data; |
401 | struct tcrypt_result *tr = req->base.data; | ||
402 | 379 | ||
403 | wait_for_completion(&tr->completion); | 380 | return crypto_wait_req(ret, wait); |
404 | reinit_completion(&tr->completion); | ||
405 | ret = tr->err; | ||
406 | } | ||
407 | return ret; | ||
408 | } | 381 | } |
409 | 382 | ||
410 | struct test_mb_ahash_data { | 383 | struct test_mb_ahash_data { |
411 | struct scatterlist sg[TVMEMSIZE]; | 384 | struct scatterlist sg[TVMEMSIZE]; |
412 | char result[64]; | 385 | char result[64]; |
413 | struct ahash_request *req; | 386 | struct ahash_request *req; |
414 | struct tcrypt_result tresult; | 387 | struct crypto_wait wait; |
415 | char *xbuf[XBUFSIZE]; | 388 | char *xbuf[XBUFSIZE]; |
416 | }; | 389 | }; |
417 | 390 | ||
@@ -440,7 +413,7 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, | |||
440 | if (testmgr_alloc_buf(data[i].xbuf)) | 413 | if (testmgr_alloc_buf(data[i].xbuf)) |
441 | goto out; | 414 | goto out; |
442 | 415 | ||
443 | init_completion(&data[i].tresult.completion); | 416 | crypto_init_wait(&data[i].wait); |
444 | 417 | ||
445 | data[i].req = ahash_request_alloc(tfm, GFP_KERNEL); | 418 | data[i].req = ahash_request_alloc(tfm, GFP_KERNEL); |
446 | if (!data[i].req) { | 419 | if (!data[i].req) { |
@@ -449,8 +422,8 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, | |||
449 | goto out; | 422 | goto out; |
450 | } | 423 | } |
451 | 424 | ||
452 | ahash_request_set_callback(data[i].req, 0, | 425 | ahash_request_set_callback(data[i].req, 0, crypto_req_done, |
453 | tcrypt_complete, &data[i].tresult); | 426 | &data[i].wait); |
454 | test_hash_sg_init(data[i].sg); | 427 | test_hash_sg_init(data[i].sg); |
455 | } | 428 | } |
456 | 429 | ||
@@ -492,16 +465,16 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, | |||
492 | if (ret) | 465 | if (ret) |
493 | break; | 466 | break; |
494 | 467 | ||
495 | complete(&data[k].tresult.completion); | 468 | crypto_req_done(&data[k].req->base, 0); |
496 | data[k].tresult.err = 0; | ||
497 | } | 469 | } |
498 | 470 | ||
499 | for (j = 0; j < k; j++) { | 471 | for (j = 0; j < k; j++) { |
500 | struct tcrypt_result *tr = &data[j].tresult; | 472 | struct crypto_wait *wait = &data[j].wait; |
473 | int wait_ret; | ||
501 | 474 | ||
502 | wait_for_completion(&tr->completion); | 475 | wait_ret = crypto_wait_req(-EINPROGRESS, wait); |
503 | if (tr->err) | 476 | if (wait_ret) |
504 | ret = tr->err; | 477 | ret = wait_ret; |
505 | } | 478 | } |
506 | 479 | ||
507 | end = get_cycles(); | 480 | end = get_cycles(); |
@@ -679,7 +652,7 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs, | |||
679 | struct hash_speed *speed, unsigned mask) | 652 | struct hash_speed *speed, unsigned mask) |
680 | { | 653 | { |
681 | struct scatterlist sg[TVMEMSIZE]; | 654 | struct scatterlist sg[TVMEMSIZE]; |
682 | struct tcrypt_result tresult; | 655 | struct crypto_wait wait; |
683 | struct ahash_request *req; | 656 | struct ahash_request *req; |
684 | struct crypto_ahash *tfm; | 657 | struct crypto_ahash *tfm; |
685 | char *output; | 658 | char *output; |
@@ -708,9 +681,9 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs, | |||
708 | goto out; | 681 | goto out; |
709 | } | 682 | } |
710 | 683 | ||
711 | init_completion(&tresult.completion); | 684 | crypto_init_wait(&wait); |
712 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | 685 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
713 | tcrypt_complete, &tresult); | 686 | crypto_req_done, &wait); |
714 | 687 | ||
715 | output = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL); | 688 | output = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL); |
716 | if (!output) | 689 | if (!output) |
@@ -765,15 +738,9 @@ static void test_hash_speed(const char *algo, unsigned int secs, | |||
765 | 738 | ||
766 | static inline int do_one_acipher_op(struct skcipher_request *req, int ret) | 739 | static inline int do_one_acipher_op(struct skcipher_request *req, int ret) |
767 | { | 740 | { |
768 | if (ret == -EINPROGRESS || ret == -EBUSY) { | 741 | struct crypto_wait *wait = req->base.data; |
769 | struct tcrypt_result *tr = req->base.data; | ||
770 | |||
771 | wait_for_completion(&tr->completion); | ||
772 | reinit_completion(&tr->completion); | ||
773 | ret = tr->err; | ||
774 | } | ||
775 | 742 | ||
776 | return ret; | 743 | return crypto_wait_req(ret, wait); |
777 | } | 744 | } |
778 | 745 | ||
779 | static int test_acipher_jiffies(struct skcipher_request *req, int enc, | 746 | static int test_acipher_jiffies(struct skcipher_request *req, int enc, |
@@ -853,7 +820,7 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs, | |||
853 | unsigned int tcount, u8 *keysize, bool async) | 820 | unsigned int tcount, u8 *keysize, bool async) |
854 | { | 821 | { |
855 | unsigned int ret, i, j, k, iv_len; | 822 | unsigned int ret, i, j, k, iv_len; |
856 | struct tcrypt_result tresult; | 823 | struct crypto_wait wait; |
857 | const char *key; | 824 | const char *key; |
858 | char iv[128]; | 825 | char iv[128]; |
859 | struct skcipher_request *req; | 826 | struct skcipher_request *req; |
@@ -866,7 +833,7 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs, | |||
866 | else | 833 | else |
867 | e = "decryption"; | 834 | e = "decryption"; |
868 | 835 | ||
869 | init_completion(&tresult.completion); | 836 | crypto_init_wait(&wait); |
870 | 837 | ||
871 | tfm = crypto_alloc_skcipher(algo, 0, async ? 0 : CRYPTO_ALG_ASYNC); | 838 | tfm = crypto_alloc_skcipher(algo, 0, async ? 0 : CRYPTO_ALG_ASYNC); |
872 | 839 | ||
@@ -887,7 +854,7 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs, | |||
887 | } | 854 | } |
888 | 855 | ||
889 | skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | 856 | skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
890 | tcrypt_complete, &tresult); | 857 | crypto_req_done, &wait); |
891 | 858 | ||
892 | i = 0; | 859 | i = 0; |
893 | do { | 860 | do { |
@@ -1269,6 +1236,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) | |||
1269 | ret += tcrypt_test("sha3-512"); | 1236 | ret += tcrypt_test("sha3-512"); |
1270 | break; | 1237 | break; |
1271 | 1238 | ||
1239 | case 52: | ||
1240 | ret += tcrypt_test("sm3"); | ||
1241 | break; | ||
1242 | |||
1272 | case 100: | 1243 | case 100: |
1273 | ret += tcrypt_test("hmac(md5)"); | 1244 | ret += tcrypt_test("hmac(md5)"); |
1274 | break; | 1245 | break; |
@@ -1603,115 +1574,116 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) | |||
1603 | speed_template_32); | 1574 | speed_template_32); |
1604 | break; | 1575 | break; |
1605 | 1576 | ||
1606 | |||
1607 | case 300: | 1577 | case 300: |
1608 | if (alg) { | 1578 | if (alg) { |
1609 | test_hash_speed(alg, sec, generic_hash_speed_template); | 1579 | test_hash_speed(alg, sec, generic_hash_speed_template); |
1610 | break; | 1580 | break; |
1611 | } | 1581 | } |
1612 | |||
1613 | /* fall through */ | 1582 | /* fall through */ |
1614 | |||
1615 | case 301: | 1583 | case 301: |
1616 | test_hash_speed("md4", sec, generic_hash_speed_template); | 1584 | test_hash_speed("md4", sec, generic_hash_speed_template); |
1617 | if (mode > 300 && mode < 400) break; | 1585 | if (mode > 300 && mode < 400) break; |
1618 | 1586 | /* fall through */ | |
1619 | case 302: | 1587 | case 302: |
1620 | test_hash_speed("md5", sec, generic_hash_speed_template); | 1588 | test_hash_speed("md5", sec, generic_hash_speed_template); |
1621 | if (mode > 300 && mode < 400) break; | 1589 | if (mode > 300 && mode < 400) break; |
1622 | 1590 | /* fall through */ | |
1623 | case 303: | 1591 | case 303: |
1624 | test_hash_speed("sha1", sec, generic_hash_speed_template); | 1592 | test_hash_speed("sha1", sec, generic_hash_speed_template); |
1625 | if (mode > 300 && mode < 400) break; | 1593 | if (mode > 300 && mode < 400) break; |
1626 | 1594 | /* fall through */ | |
1627 | case 304: | 1595 | case 304: |
1628 | test_hash_speed("sha256", sec, generic_hash_speed_template); | 1596 | test_hash_speed("sha256", sec, generic_hash_speed_template); |
1629 | if (mode > 300 && mode < 400) break; | 1597 | if (mode > 300 && mode < 400) break; |
1630 | 1598 | /* fall through */ | |
1631 | case 305: | 1599 | case 305: |
1632 | test_hash_speed("sha384", sec, generic_hash_speed_template); | 1600 | test_hash_speed("sha384", sec, generic_hash_speed_template); |
1633 | if (mode > 300 && mode < 400) break; | 1601 | if (mode > 300 && mode < 400) break; |
1634 | 1602 | /* fall through */ | |
1635 | case 306: | 1603 | case 306: |
1636 | test_hash_speed("sha512", sec, generic_hash_speed_template); | 1604 | test_hash_speed("sha512", sec, generic_hash_speed_template); |
1637 | if (mode > 300 && mode < 400) break; | 1605 | if (mode > 300 && mode < 400) break; |
1638 | 1606 | /* fall through */ | |
1639 | case 307: | 1607 | case 307: |
1640 | test_hash_speed("wp256", sec, generic_hash_speed_template); | 1608 | test_hash_speed("wp256", sec, generic_hash_speed_template); |
1641 | if (mode > 300 && mode < 400) break; | 1609 | if (mode > 300 && mode < 400) break; |
1642 | 1610 | /* fall through */ | |
1643 | case 308: | 1611 | case 308: |
1644 | test_hash_speed("wp384", sec, generic_hash_speed_template); | 1612 | test_hash_speed("wp384", sec, generic_hash_speed_template); |
1645 | if (mode > 300 && mode < 400) break; | 1613 | if (mode > 300 && mode < 400) break; |
1646 | 1614 | /* fall through */ | |
1647 | case 309: | 1615 | case 309: |
1648 | test_hash_speed("wp512", sec, generic_hash_speed_template); | 1616 | test_hash_speed("wp512", sec, generic_hash_speed_template); |
1649 | if (mode > 300 && mode < 400) break; | 1617 | if (mode > 300 && mode < 400) break; |
1650 | 1618 | /* fall through */ | |
1651 | case 310: | 1619 | case 310: |
1652 | test_hash_speed("tgr128", sec, generic_hash_speed_template); | 1620 | test_hash_speed("tgr128", sec, generic_hash_speed_template); |
1653 | if (mode > 300 && mode < 400) break; | 1621 | if (mode > 300 && mode < 400) break; |
1654 | 1622 | /* fall through */ | |
1655 | case 311: | 1623 | case 311: |
1656 | test_hash_speed("tgr160", sec, generic_hash_speed_template); | 1624 | test_hash_speed("tgr160", sec, generic_hash_speed_template); |
1657 | if (mode > 300 && mode < 400) break; | 1625 | if (mode > 300 && mode < 400) break; |
1658 | 1626 | /* fall through */ | |
1659 | case 312: | 1627 | case 312: |
1660 | test_hash_speed("tgr192", sec, generic_hash_speed_template); | 1628 | test_hash_speed("tgr192", sec, generic_hash_speed_template); |
1661 | if (mode > 300 && mode < 400) break; | 1629 | if (mode > 300 && mode < 400) break; |
1662 | 1630 | /* fall through */ | |
1663 | case 313: | 1631 | case 313: |
1664 | test_hash_speed("sha224", sec, generic_hash_speed_template); | 1632 | test_hash_speed("sha224", sec, generic_hash_speed_template); |
1665 | if (mode > 300 && mode < 400) break; | 1633 | if (mode > 300 && mode < 400) break; |
1666 | 1634 | /* fall through */ | |
1667 | case 314: | 1635 | case 314: |
1668 | test_hash_speed("rmd128", sec, generic_hash_speed_template); | 1636 | test_hash_speed("rmd128", sec, generic_hash_speed_template); |
1669 | if (mode > 300 && mode < 400) break; | 1637 | if (mode > 300 && mode < 400) break; |
1670 | 1638 | /* fall through */ | |
1671 | case 315: | 1639 | case 315: |
1672 | test_hash_speed("rmd160", sec, generic_hash_speed_template); | 1640 | test_hash_speed("rmd160", sec, generic_hash_speed_template); |
1673 | if (mode > 300 && mode < 400) break; | 1641 | if (mode > 300 && mode < 400) break; |
1674 | 1642 | /* fall through */ | |
1675 | case 316: | 1643 | case 316: |
1676 | test_hash_speed("rmd256", sec, generic_hash_speed_template); | 1644 | test_hash_speed("rmd256", sec, generic_hash_speed_template); |
1677 | if (mode > 300 && mode < 400) break; | 1645 | if (mode > 300 && mode < 400) break; |
1678 | 1646 | /* fall through */ | |
1679 | case 317: | 1647 | case 317: |
1680 | test_hash_speed("rmd320", sec, generic_hash_speed_template); | 1648 | test_hash_speed("rmd320", sec, generic_hash_speed_template); |
1681 | if (mode > 300 && mode < 400) break; | 1649 | if (mode > 300 && mode < 400) break; |
1682 | 1650 | /* fall through */ | |
1683 | case 318: | 1651 | case 318: |
1684 | test_hash_speed("ghash-generic", sec, hash_speed_template_16); | 1652 | test_hash_speed("ghash-generic", sec, hash_speed_template_16); |
1685 | if (mode > 300 && mode < 400) break; | 1653 | if (mode > 300 && mode < 400) break; |
1686 | 1654 | /* fall through */ | |
1687 | case 319: | 1655 | case 319: |
1688 | test_hash_speed("crc32c", sec, generic_hash_speed_template); | 1656 | test_hash_speed("crc32c", sec, generic_hash_speed_template); |
1689 | if (mode > 300 && mode < 400) break; | 1657 | if (mode > 300 && mode < 400) break; |
1690 | 1658 | /* fall through */ | |
1691 | case 320: | 1659 | case 320: |
1692 | test_hash_speed("crct10dif", sec, generic_hash_speed_template); | 1660 | test_hash_speed("crct10dif", sec, generic_hash_speed_template); |
1693 | if (mode > 300 && mode < 400) break; | 1661 | if (mode > 300 && mode < 400) break; |
1694 | 1662 | /* fall through */ | |
1695 | case 321: | 1663 | case 321: |
1696 | test_hash_speed("poly1305", sec, poly1305_speed_template); | 1664 | test_hash_speed("poly1305", sec, poly1305_speed_template); |
1697 | if (mode > 300 && mode < 400) break; | 1665 | if (mode > 300 && mode < 400) break; |
1698 | 1666 | /* fall through */ | |
1699 | case 322: | 1667 | case 322: |
1700 | test_hash_speed("sha3-224", sec, generic_hash_speed_template); | 1668 | test_hash_speed("sha3-224", sec, generic_hash_speed_template); |
1701 | if (mode > 300 && mode < 400) break; | 1669 | if (mode > 300 && mode < 400) break; |
1702 | 1670 | /* fall through */ | |
1703 | case 323: | 1671 | case 323: |
1704 | test_hash_speed("sha3-256", sec, generic_hash_speed_template); | 1672 | test_hash_speed("sha3-256", sec, generic_hash_speed_template); |
1705 | if (mode > 300 && mode < 400) break; | 1673 | if (mode > 300 && mode < 400) break; |
1706 | 1674 | /* fall through */ | |
1707 | case 324: | 1675 | case 324: |
1708 | test_hash_speed("sha3-384", sec, generic_hash_speed_template); | 1676 | test_hash_speed("sha3-384", sec, generic_hash_speed_template); |
1709 | if (mode > 300 && mode < 400) break; | 1677 | if (mode > 300 && mode < 400) break; |
1710 | 1678 | /* fall through */ | |
1711 | case 325: | 1679 | case 325: |
1712 | test_hash_speed("sha3-512", sec, generic_hash_speed_template); | 1680 | test_hash_speed("sha3-512", sec, generic_hash_speed_template); |
1713 | if (mode > 300 && mode < 400) break; | 1681 | if (mode > 300 && mode < 400) break; |
1714 | 1682 | /* fall through */ | |
1683 | case 326: | ||
1684 | test_hash_speed("sm3", sec, generic_hash_speed_template); | ||
1685 | if (mode > 300 && mode < 400) break; | ||
1686 | /* fall through */ | ||
1715 | case 399: | 1687 | case 399: |
1716 | break; | 1688 | break; |
1717 | 1689 | ||
@@ -1720,106 +1692,107 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) | |||
1720 | test_ahash_speed(alg, sec, generic_hash_speed_template); | 1692 | test_ahash_speed(alg, sec, generic_hash_speed_template); |
1721 | break; | 1693 | break; |
1722 | } | 1694 | } |
1723 | |||
1724 | /* fall through */ | 1695 | /* fall through */ |
1725 | |||
1726 | case 401: | 1696 | case 401: |
1727 | test_ahash_speed("md4", sec, generic_hash_speed_template); | 1697 | test_ahash_speed("md4", sec, generic_hash_speed_template); |
1728 | if (mode > 400 && mode < 500) break; | 1698 | if (mode > 400 && mode < 500) break; |
1729 | 1699 | /* fall through */ | |
1730 | case 402: | 1700 | case 402: |
1731 | test_ahash_speed("md5", sec, generic_hash_speed_template); | 1701 | test_ahash_speed("md5", sec, generic_hash_speed_template); |
1732 | if (mode > 400 && mode < 500) break; | 1702 | if (mode > 400 && mode < 500) break; |
1733 | 1703 | /* fall through */ | |
1734 | case 403: | 1704 | case 403: |
1735 | test_ahash_speed("sha1", sec, generic_hash_speed_template); | 1705 | test_ahash_speed("sha1", sec, generic_hash_speed_template); |
1736 | if (mode > 400 && mode < 500) break; | 1706 | if (mode > 400 && mode < 500) break; |
1737 | 1707 | /* fall through */ | |
1738 | case 404: | 1708 | case 404: |
1739 | test_ahash_speed("sha256", sec, generic_hash_speed_template); | 1709 | test_ahash_speed("sha256", sec, generic_hash_speed_template); |
1740 | if (mode > 400 && mode < 500) break; | 1710 | if (mode > 400 && mode < 500) break; |
1741 | 1711 | /* fall through */ | |
1742 | case 405: | 1712 | case 405: |
1743 | test_ahash_speed("sha384", sec, generic_hash_speed_template); | 1713 | test_ahash_speed("sha384", sec, generic_hash_speed_template); |
1744 | if (mode > 400 && mode < 500) break; | 1714 | if (mode > 400 && mode < 500) break; |
1745 | 1715 | /* fall through */ | |
1746 | case 406: | 1716 | case 406: |
1747 | test_ahash_speed("sha512", sec, generic_hash_speed_template); | 1717 | test_ahash_speed("sha512", sec, generic_hash_speed_template); |
1748 | if (mode > 400 && mode < 500) break; | 1718 | if (mode > 400 && mode < 500) break; |
1749 | 1719 | /* fall through */ | |
1750 | case 407: | 1720 | case 407: |
1751 | test_ahash_speed("wp256", sec, generic_hash_speed_template); | 1721 | test_ahash_speed("wp256", sec, generic_hash_speed_template); |
1752 | if (mode > 400 && mode < 500) break; | 1722 | if (mode > 400 && mode < 500) break; |
1753 | 1723 | /* fall through */ | |
1754 | case 408: | 1724 | case 408: |
1755 | test_ahash_speed("wp384", sec, generic_hash_speed_template); | 1725 | test_ahash_speed("wp384", sec, generic_hash_speed_template); |
1756 | if (mode > 400 && mode < 500) break; | 1726 | if (mode > 400 && mode < 500) break; |
1757 | 1727 | /* fall through */ | |
1758 | case 409: | 1728 | case 409: |
1759 | test_ahash_speed("wp512", sec, generic_hash_speed_template); | 1729 | test_ahash_speed("wp512", sec, generic_hash_speed_template); |
1760 | if (mode > 400 && mode < 500) break; | 1730 | if (mode > 400 && mode < 500) break; |
1761 | 1731 | /* fall through */ | |
1762 | case 410: | 1732 | case 410: |
1763 | test_ahash_speed("tgr128", sec, generic_hash_speed_template); | 1733 | test_ahash_speed("tgr128", sec, generic_hash_speed_template); |
1764 | if (mode > 400 && mode < 500) break; | 1734 | if (mode > 400 && mode < 500) break; |
1765 | 1735 | /* fall through */ | |
1766 | case 411: | 1736 | case 411: |
1767 | test_ahash_speed("tgr160", sec, generic_hash_speed_template); | 1737 | test_ahash_speed("tgr160", sec, generic_hash_speed_template); |
1768 | if (mode > 400 && mode < 500) break; | 1738 | if (mode > 400 && mode < 500) break; |
1769 | 1739 | /* fall through */ | |
1770 | case 412: | 1740 | case 412: |
1771 | test_ahash_speed("tgr192", sec, generic_hash_speed_template); | 1741 | test_ahash_speed("tgr192", sec, generic_hash_speed_template); |
1772 | if (mode > 400 && mode < 500) break; | 1742 | if (mode > 400 && mode < 500) break; |
1773 | 1743 | /* fall through */ | |
1774 | case 413: | 1744 | case 413: |
1775 | test_ahash_speed("sha224", sec, generic_hash_speed_template); | 1745 | test_ahash_speed("sha224", sec, generic_hash_speed_template); |
1776 | if (mode > 400 && mode < 500) break; | 1746 | if (mode > 400 && mode < 500) break; |
1777 | 1747 | /* fall through */ | |
1778 | case 414: | 1748 | case 414: |
1779 | test_ahash_speed("rmd128", sec, generic_hash_speed_template); | 1749 | test_ahash_speed("rmd128", sec, generic_hash_speed_template); |
1780 | if (mode > 400 && mode < 500) break; | 1750 | if (mode > 400 && mode < 500) break; |
1781 | 1751 | /* fall through */ | |
1782 | case 415: | 1752 | case 415: |
1783 | test_ahash_speed("rmd160", sec, generic_hash_speed_template); | 1753 | test_ahash_speed("rmd160", sec, generic_hash_speed_template); |
1784 | if (mode > 400 && mode < 500) break; | 1754 | if (mode > 400 && mode < 500) break; |
1785 | 1755 | /* fall through */ | |
1786 | case 416: | 1756 | case 416: |
1787 | test_ahash_speed("rmd256", sec, generic_hash_speed_template); | 1757 | test_ahash_speed("rmd256", sec, generic_hash_speed_template); |
1788 | if (mode > 400 && mode < 500) break; | 1758 | if (mode > 400 && mode < 500) break; |
1789 | 1759 | /* fall through */ | |
1790 | case 417: | 1760 | case 417: |
1791 | test_ahash_speed("rmd320", sec, generic_hash_speed_template); | 1761 | test_ahash_speed("rmd320", sec, generic_hash_speed_template); |
1792 | if (mode > 400 && mode < 500) break; | 1762 | if (mode > 400 && mode < 500) break; |
1793 | 1763 | /* fall through */ | |
1794 | case 418: | 1764 | case 418: |
1795 | test_ahash_speed("sha3-224", sec, generic_hash_speed_template); | 1765 | test_ahash_speed("sha3-224", sec, generic_hash_speed_template); |
1796 | if (mode > 400 && mode < 500) break; | 1766 | if (mode > 400 && mode < 500) break; |
1797 | 1767 | /* fall through */ | |
1798 | case 419: | 1768 | case 419: |
1799 | test_ahash_speed("sha3-256", sec, generic_hash_speed_template); | 1769 | test_ahash_speed("sha3-256", sec, generic_hash_speed_template); |
1800 | if (mode > 400 && mode < 500) break; | 1770 | if (mode > 400 && mode < 500) break; |
1801 | 1771 | /* fall through */ | |
1802 | case 420: | 1772 | case 420: |
1803 | test_ahash_speed("sha3-384", sec, generic_hash_speed_template); | 1773 | test_ahash_speed("sha3-384", sec, generic_hash_speed_template); |
1804 | if (mode > 400 && mode < 500) break; | 1774 | if (mode > 400 && mode < 500) break; |
1805 | 1775 | /* fall through */ | |
1806 | |||
1807 | case 421: | 1776 | case 421: |
1808 | test_ahash_speed("sha3-512", sec, generic_hash_speed_template); | 1777 | test_ahash_speed("sha3-512", sec, generic_hash_speed_template); |
1809 | if (mode > 400 && mode < 500) break; | 1778 | if (mode > 400 && mode < 500) break; |
1810 | 1779 | /* fall through */ | |
1811 | case 422: | 1780 | case 422: |
1812 | test_mb_ahash_speed("sha1", sec, generic_hash_speed_template); | 1781 | test_mb_ahash_speed("sha1", sec, generic_hash_speed_template); |
1813 | if (mode > 400 && mode < 500) break; | 1782 | if (mode > 400 && mode < 500) break; |
1814 | 1783 | /* fall through */ | |
1815 | case 423: | 1784 | case 423: |
1816 | test_mb_ahash_speed("sha256", sec, generic_hash_speed_template); | 1785 | test_mb_ahash_speed("sha256", sec, generic_hash_speed_template); |
1817 | if (mode > 400 && mode < 500) break; | 1786 | if (mode > 400 && mode < 500) break; |
1818 | 1787 | /* fall through */ | |
1819 | case 424: | 1788 | case 424: |
1820 | test_mb_ahash_speed("sha512", sec, generic_hash_speed_template); | 1789 | test_mb_ahash_speed("sha512", sec, generic_hash_speed_template); |
1821 | if (mode > 400 && mode < 500) break; | 1790 | if (mode > 400 && mode < 500) break; |
1822 | 1791 | /* fall through */ | |
1792 | case 425: | ||
1793 | test_mb_ahash_speed("sm3", sec, generic_hash_speed_template); | ||
1794 | if (mode > 400 && mode < 500) break; | ||
1795 | /* fall through */ | ||
1823 | case 499: | 1796 | case 499: |
1824 | break; | 1797 | break; |
1825 | 1798 | ||
diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 7125ba3880af..29d7020b8826 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c | |||
@@ -76,11 +76,6 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) | |||
76 | #define ENCRYPT 1 | 76 | #define ENCRYPT 1 |
77 | #define DECRYPT 0 | 77 | #define DECRYPT 0 |
78 | 78 | ||
79 | struct tcrypt_result { | ||
80 | struct completion completion; | ||
81 | int err; | ||
82 | }; | ||
83 | |||
84 | struct aead_test_suite { | 79 | struct aead_test_suite { |
85 | struct { | 80 | struct { |
86 | const struct aead_testvec *vecs; | 81 | const struct aead_testvec *vecs; |
@@ -155,17 +150,6 @@ static void hexdump(unsigned char *buf, unsigned int len) | |||
155 | buf, len, false); | 150 | buf, len, false); |
156 | } | 151 | } |
157 | 152 | ||
158 | static void tcrypt_complete(struct crypto_async_request *req, int err) | ||
159 | { | ||
160 | struct tcrypt_result *res = req->data; | ||
161 | |||
162 | if (err == -EINPROGRESS) | ||
163 | return; | ||
164 | |||
165 | res->err = err; | ||
166 | complete(&res->completion); | ||
167 | } | ||
168 | |||
169 | static int testmgr_alloc_buf(char *buf[XBUFSIZE]) | 153 | static int testmgr_alloc_buf(char *buf[XBUFSIZE]) |
170 | { | 154 | { |
171 | int i; | 155 | int i; |
@@ -193,20 +177,10 @@ static void testmgr_free_buf(char *buf[XBUFSIZE]) | |||
193 | free_page((unsigned long)buf[i]); | 177 | free_page((unsigned long)buf[i]); |
194 | } | 178 | } |
195 | 179 | ||
196 | static int wait_async_op(struct tcrypt_result *tr, int ret) | ||
197 | { | ||
198 | if (ret == -EINPROGRESS || ret == -EBUSY) { | ||
199 | wait_for_completion(&tr->completion); | ||
200 | reinit_completion(&tr->completion); | ||
201 | ret = tr->err; | ||
202 | } | ||
203 | return ret; | ||
204 | } | ||
205 | |||
206 | static int ahash_partial_update(struct ahash_request **preq, | 180 | static int ahash_partial_update(struct ahash_request **preq, |
207 | struct crypto_ahash *tfm, const struct hash_testvec *template, | 181 | struct crypto_ahash *tfm, const struct hash_testvec *template, |
208 | void *hash_buff, int k, int temp, struct scatterlist *sg, | 182 | void *hash_buff, int k, int temp, struct scatterlist *sg, |
209 | const char *algo, char *result, struct tcrypt_result *tresult) | 183 | const char *algo, char *result, struct crypto_wait *wait) |
210 | { | 184 | { |
211 | char *state; | 185 | char *state; |
212 | struct ahash_request *req; | 186 | struct ahash_request *req; |
@@ -236,7 +210,7 @@ static int ahash_partial_update(struct ahash_request **preq, | |||
236 | } | 210 | } |
237 | ahash_request_set_callback(req, | 211 | ahash_request_set_callback(req, |
238 | CRYPTO_TFM_REQ_MAY_BACKLOG, | 212 | CRYPTO_TFM_REQ_MAY_BACKLOG, |
239 | tcrypt_complete, tresult); | 213 | crypto_req_done, wait); |
240 | 214 | ||
241 | memcpy(hash_buff, template->plaintext + temp, | 215 | memcpy(hash_buff, template->plaintext + temp, |
242 | template->tap[k]); | 216 | template->tap[k]); |
@@ -247,7 +221,7 @@ static int ahash_partial_update(struct ahash_request **preq, | |||
247 | pr_err("alg: hash: Failed to import() for %s\n", algo); | 221 | pr_err("alg: hash: Failed to import() for %s\n", algo); |
248 | goto out; | 222 | goto out; |
249 | } | 223 | } |
250 | ret = wait_async_op(tresult, crypto_ahash_update(req)); | 224 | ret = crypto_wait_req(crypto_ahash_update(req), wait); |
251 | if (ret) | 225 | if (ret) |
252 | goto out; | 226 | goto out; |
253 | *preq = req; | 227 | *preq = req; |
@@ -272,7 +246,7 @@ static int __test_hash(struct crypto_ahash *tfm, | |||
272 | char *result; | 246 | char *result; |
273 | char *key; | 247 | char *key; |
274 | struct ahash_request *req; | 248 | struct ahash_request *req; |
275 | struct tcrypt_result tresult; | 249 | struct crypto_wait wait; |
276 | void *hash_buff; | 250 | void *hash_buff; |
277 | char *xbuf[XBUFSIZE]; | 251 | char *xbuf[XBUFSIZE]; |
278 | int ret = -ENOMEM; | 252 | int ret = -ENOMEM; |
@@ -286,7 +260,7 @@ static int __test_hash(struct crypto_ahash *tfm, | |||
286 | if (testmgr_alloc_buf(xbuf)) | 260 | if (testmgr_alloc_buf(xbuf)) |
287 | goto out_nobuf; | 261 | goto out_nobuf; |
288 | 262 | ||
289 | init_completion(&tresult.completion); | 263 | crypto_init_wait(&wait); |
290 | 264 | ||
291 | req = ahash_request_alloc(tfm, GFP_KERNEL); | 265 | req = ahash_request_alloc(tfm, GFP_KERNEL); |
292 | if (!req) { | 266 | if (!req) { |
@@ -295,7 +269,7 @@ static int __test_hash(struct crypto_ahash *tfm, | |||
295 | goto out_noreq; | 269 | goto out_noreq; |
296 | } | 270 | } |
297 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | 271 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
298 | tcrypt_complete, &tresult); | 272 | crypto_req_done, &wait); |
299 | 273 | ||
300 | j = 0; | 274 | j = 0; |
301 | for (i = 0; i < tcount; i++) { | 275 | for (i = 0; i < tcount; i++) { |
@@ -335,26 +309,26 @@ static int __test_hash(struct crypto_ahash *tfm, | |||
335 | 309 | ||
336 | ahash_request_set_crypt(req, sg, result, template[i].psize); | 310 | ahash_request_set_crypt(req, sg, result, template[i].psize); |
337 | if (use_digest) { | 311 | if (use_digest) { |
338 | ret = wait_async_op(&tresult, crypto_ahash_digest(req)); | 312 | ret = crypto_wait_req(crypto_ahash_digest(req), &wait); |
339 | if (ret) { | 313 | if (ret) { |
340 | pr_err("alg: hash: digest failed on test %d " | 314 | pr_err("alg: hash: digest failed on test %d " |
341 | "for %s: ret=%d\n", j, algo, -ret); | 315 | "for %s: ret=%d\n", j, algo, -ret); |
342 | goto out; | 316 | goto out; |
343 | } | 317 | } |
344 | } else { | 318 | } else { |
345 | ret = wait_async_op(&tresult, crypto_ahash_init(req)); | 319 | ret = crypto_wait_req(crypto_ahash_init(req), &wait); |
346 | if (ret) { | 320 | if (ret) { |
347 | pr_err("alg: hash: init failed on test %d " | 321 | pr_err("alg: hash: init failed on test %d " |
348 | "for %s: ret=%d\n", j, algo, -ret); | 322 | "for %s: ret=%d\n", j, algo, -ret); |
349 | goto out; | 323 | goto out; |
350 | } | 324 | } |
351 | ret = wait_async_op(&tresult, crypto_ahash_update(req)); | 325 | ret = crypto_wait_req(crypto_ahash_update(req), &wait); |
352 | if (ret) { | 326 | if (ret) { |
353 | pr_err("alg: hash: update failed on test %d " | 327 | pr_err("alg: hash: update failed on test %d " |
354 | "for %s: ret=%d\n", j, algo, -ret); | 328 | "for %s: ret=%d\n", j, algo, -ret); |
355 | goto out; | 329 | goto out; |
356 | } | 330 | } |
357 | ret = wait_async_op(&tresult, crypto_ahash_final(req)); | 331 | ret = crypto_wait_req(crypto_ahash_final(req), &wait); |
358 | if (ret) { | 332 | if (ret) { |
359 | pr_err("alg: hash: final failed on test %d " | 333 | pr_err("alg: hash: final failed on test %d " |
360 | "for %s: ret=%d\n", j, algo, -ret); | 334 | "for %s: ret=%d\n", j, algo, -ret); |
@@ -420,22 +394,10 @@ static int __test_hash(struct crypto_ahash *tfm, | |||
420 | } | 394 | } |
421 | 395 | ||
422 | ahash_request_set_crypt(req, sg, result, template[i].psize); | 396 | ahash_request_set_crypt(req, sg, result, template[i].psize); |
423 | ret = crypto_ahash_digest(req); | 397 | ret = crypto_wait_req(crypto_ahash_digest(req), &wait); |
424 | switch (ret) { | 398 | if (ret) { |
425 | case 0: | 399 | pr_err("alg: hash: digest failed on chunking test %d for %s: ret=%d\n", |
426 | break; | 400 | j, algo, -ret); |
427 | case -EINPROGRESS: | ||
428 | case -EBUSY: | ||
429 | wait_for_completion(&tresult.completion); | ||
430 | reinit_completion(&tresult.completion); | ||
431 | ret = tresult.err; | ||
432 | if (!ret) | ||
433 | break; | ||
434 | /* fall through */ | ||
435 | default: | ||
436 | printk(KERN_ERR "alg: hash: digest failed " | ||
437 | "on chunking test %d for %s: " | ||
438 | "ret=%d\n", j, algo, -ret); | ||
439 | goto out; | 401 | goto out; |
440 | } | 402 | } |
441 | 403 | ||
@@ -486,13 +448,13 @@ static int __test_hash(struct crypto_ahash *tfm, | |||
486 | } | 448 | } |
487 | 449 | ||
488 | ahash_request_set_crypt(req, sg, result, template[i].tap[0]); | 450 | ahash_request_set_crypt(req, sg, result, template[i].tap[0]); |
489 | ret = wait_async_op(&tresult, crypto_ahash_init(req)); | 451 | ret = crypto_wait_req(crypto_ahash_init(req), &wait); |
490 | if (ret) { | 452 | if (ret) { |
491 | pr_err("alg: hash: init failed on test %d for %s: ret=%d\n", | 453 | pr_err("alg: hash: init failed on test %d for %s: ret=%d\n", |
492 | j, algo, -ret); | 454 | j, algo, -ret); |
493 | goto out; | 455 | goto out; |
494 | } | 456 | } |
495 | ret = wait_async_op(&tresult, crypto_ahash_update(req)); | 457 | ret = crypto_wait_req(crypto_ahash_update(req), &wait); |
496 | if (ret) { | 458 | if (ret) { |
497 | pr_err("alg: hash: update failed on test %d for %s: ret=%d\n", | 459 | pr_err("alg: hash: update failed on test %d for %s: ret=%d\n", |
498 | j, algo, -ret); | 460 | j, algo, -ret); |
@@ -503,7 +465,7 @@ static int __test_hash(struct crypto_ahash *tfm, | |||
503 | for (k = 1; k < template[i].np; k++) { | 465 | for (k = 1; k < template[i].np; k++) { |
504 | ret = ahash_partial_update(&req, tfm, &template[i], | 466 | ret = ahash_partial_update(&req, tfm, &template[i], |
505 | hash_buff, k, temp, &sg[0], algo, result, | 467 | hash_buff, k, temp, &sg[0], algo, result, |
506 | &tresult); | 468 | &wait); |
507 | if (ret) { | 469 | if (ret) { |
508 | pr_err("alg: hash: partial update failed on test %d for %s: ret=%d\n", | 470 | pr_err("alg: hash: partial update failed on test %d for %s: ret=%d\n", |
509 | j, algo, -ret); | 471 | j, algo, -ret); |
@@ -511,7 +473,7 @@ static int __test_hash(struct crypto_ahash *tfm, | |||
511 | } | 473 | } |
512 | temp += template[i].tap[k]; | 474 | temp += template[i].tap[k]; |
513 | } | 475 | } |
514 | ret = wait_async_op(&tresult, crypto_ahash_final(req)); | 476 | ret = crypto_wait_req(crypto_ahash_final(req), &wait); |
515 | if (ret) { | 477 | if (ret) { |
516 | pr_err("alg: hash: final failed on test %d for %s: ret=%d\n", | 478 | pr_err("alg: hash: final failed on test %d for %s: ret=%d\n", |
517 | j, algo, -ret); | 479 | j, algo, -ret); |
@@ -580,7 +542,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
580 | struct scatterlist *sg; | 542 | struct scatterlist *sg; |
581 | struct scatterlist *sgout; | 543 | struct scatterlist *sgout; |
582 | const char *e, *d; | 544 | const char *e, *d; |
583 | struct tcrypt_result result; | 545 | struct crypto_wait wait; |
584 | unsigned int authsize, iv_len; | 546 | unsigned int authsize, iv_len; |
585 | void *input; | 547 | void *input; |
586 | void *output; | 548 | void *output; |
@@ -619,7 +581,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
619 | else | 581 | else |
620 | e = "decryption"; | 582 | e = "decryption"; |
621 | 583 | ||
622 | init_completion(&result.completion); | 584 | crypto_init_wait(&wait); |
623 | 585 | ||
624 | req = aead_request_alloc(tfm, GFP_KERNEL); | 586 | req = aead_request_alloc(tfm, GFP_KERNEL); |
625 | if (!req) { | 587 | if (!req) { |
@@ -629,7 +591,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
629 | } | 591 | } |
630 | 592 | ||
631 | aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | 593 | aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
632 | tcrypt_complete, &result); | 594 | crypto_req_done, &wait); |
633 | 595 | ||
634 | iv_len = crypto_aead_ivsize(tfm); | 596 | iv_len = crypto_aead_ivsize(tfm); |
635 | 597 | ||
@@ -709,7 +671,8 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
709 | 671 | ||
710 | aead_request_set_ad(req, template[i].alen); | 672 | aead_request_set_ad(req, template[i].alen); |
711 | 673 | ||
712 | ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); | 674 | ret = crypto_wait_req(enc ? crypto_aead_encrypt(req) |
675 | : crypto_aead_decrypt(req), &wait); | ||
713 | 676 | ||
714 | switch (ret) { | 677 | switch (ret) { |
715 | case 0: | 678 | case 0: |
@@ -722,13 +685,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
722 | goto out; | 685 | goto out; |
723 | } | 686 | } |
724 | break; | 687 | break; |
725 | case -EINPROGRESS: | ||
726 | case -EBUSY: | ||
727 | wait_for_completion(&result.completion); | ||
728 | reinit_completion(&result.completion); | ||
729 | ret = result.err; | ||
730 | if (!ret) | ||
731 | break; | ||
732 | case -EBADMSG: | 688 | case -EBADMSG: |
733 | if (template[i].novrfy) | 689 | if (template[i].novrfy) |
734 | /* verification failure was expected */ | 690 | /* verification failure was expected */ |
@@ -866,7 +822,8 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
866 | 822 | ||
867 | aead_request_set_ad(req, template[i].alen); | 823 | aead_request_set_ad(req, template[i].alen); |
868 | 824 | ||
869 | ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); | 825 | ret = crypto_wait_req(enc ? crypto_aead_encrypt(req) |
826 | : crypto_aead_decrypt(req), &wait); | ||
870 | 827 | ||
871 | switch (ret) { | 828 | switch (ret) { |
872 | case 0: | 829 | case 0: |
@@ -879,13 +836,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
879 | goto out; | 836 | goto out; |
880 | } | 837 | } |
881 | break; | 838 | break; |
882 | case -EINPROGRESS: | ||
883 | case -EBUSY: | ||
884 | wait_for_completion(&result.completion); | ||
885 | reinit_completion(&result.completion); | ||
886 | ret = result.err; | ||
887 | if (!ret) | ||
888 | break; | ||
889 | case -EBADMSG: | 839 | case -EBADMSG: |
890 | if (template[i].novrfy) | 840 | if (template[i].novrfy) |
891 | /* verification failure was expected */ | 841 | /* verification failure was expected */ |
@@ -1083,7 +1033,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc, | |||
1083 | struct scatterlist sg[8]; | 1033 | struct scatterlist sg[8]; |
1084 | struct scatterlist sgout[8]; | 1034 | struct scatterlist sgout[8]; |
1085 | const char *e, *d; | 1035 | const char *e, *d; |
1086 | struct tcrypt_result result; | 1036 | struct crypto_wait wait; |
1087 | void *data; | 1037 | void *data; |
1088 | char iv[MAX_IVLEN]; | 1038 | char iv[MAX_IVLEN]; |
1089 | char *xbuf[XBUFSIZE]; | 1039 | char *xbuf[XBUFSIZE]; |
@@ -1107,7 +1057,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc, | |||
1107 | else | 1057 | else |
1108 | e = "decryption"; | 1058 | e = "decryption"; |
1109 | 1059 | ||
1110 | init_completion(&result.completion); | 1060 | crypto_init_wait(&wait); |
1111 | 1061 | ||
1112 | req = skcipher_request_alloc(tfm, GFP_KERNEL); | 1062 | req = skcipher_request_alloc(tfm, GFP_KERNEL); |
1113 | if (!req) { | 1063 | if (!req) { |
@@ -1117,7 +1067,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc, | |||
1117 | } | 1067 | } |
1118 | 1068 | ||
1119 | skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | 1069 | skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
1120 | tcrypt_complete, &result); | 1070 | crypto_req_done, &wait); |
1121 | 1071 | ||
1122 | j = 0; | 1072 | j = 0; |
1123 | for (i = 0; i < tcount; i++) { | 1073 | for (i = 0; i < tcount; i++) { |
@@ -1164,21 +1114,10 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc, | |||
1164 | 1114 | ||
1165 | skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, | 1115 | skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, |
1166 | template[i].ilen, iv); | 1116 | template[i].ilen, iv); |
1167 | ret = enc ? crypto_skcipher_encrypt(req) : | 1117 | ret = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) : |
1168 | crypto_skcipher_decrypt(req); | 1118 | crypto_skcipher_decrypt(req), &wait); |
1169 | 1119 | ||
1170 | switch (ret) { | 1120 | if (ret) { |
1171 | case 0: | ||
1172 | break; | ||
1173 | case -EINPROGRESS: | ||
1174 | case -EBUSY: | ||
1175 | wait_for_completion(&result.completion); | ||
1176 | reinit_completion(&result.completion); | ||
1177 | ret = result.err; | ||
1178 | if (!ret) | ||
1179 | break; | ||
1180 | /* fall through */ | ||
1181 | default: | ||
1182 | pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n", | 1121 | pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n", |
1183 | d, e, j, algo, -ret); | 1122 | d, e, j, algo, -ret); |
1184 | goto out; | 1123 | goto out; |
@@ -1272,21 +1211,10 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc, | |||
1272 | skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, | 1211 | skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, |
1273 | template[i].ilen, iv); | 1212 | template[i].ilen, iv); |
1274 | 1213 | ||
1275 | ret = enc ? crypto_skcipher_encrypt(req) : | 1214 | ret = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) : |
1276 | crypto_skcipher_decrypt(req); | 1215 | crypto_skcipher_decrypt(req), &wait); |
1277 | 1216 | ||
1278 | switch (ret) { | 1217 | if (ret) { |
1279 | case 0: | ||
1280 | break; | ||
1281 | case -EINPROGRESS: | ||
1282 | case -EBUSY: | ||
1283 | wait_for_completion(&result.completion); | ||
1284 | reinit_completion(&result.completion); | ||
1285 | ret = result.err; | ||
1286 | if (!ret) | ||
1287 | break; | ||
1288 | /* fall through */ | ||
1289 | default: | ||
1290 | pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n", | 1218 | pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n", |
1291 | d, e, j, algo, -ret); | 1219 | d, e, j, algo, -ret); |
1292 | goto out; | 1220 | goto out; |
@@ -1462,7 +1390,7 @@ static int test_acomp(struct crypto_acomp *tfm, | |||
1462 | int ret; | 1390 | int ret; |
1463 | struct scatterlist src, dst; | 1391 | struct scatterlist src, dst; |
1464 | struct acomp_req *req; | 1392 | struct acomp_req *req; |
1465 | struct tcrypt_result result; | 1393 | struct crypto_wait wait; |
1466 | 1394 | ||
1467 | output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL); | 1395 | output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL); |
1468 | if (!output) | 1396 | if (!output) |
@@ -1486,7 +1414,7 @@ static int test_acomp(struct crypto_acomp *tfm, | |||
1486 | } | 1414 | } |
1487 | 1415 | ||
1488 | memset(output, 0, dlen); | 1416 | memset(output, 0, dlen); |
1489 | init_completion(&result.completion); | 1417 | crypto_init_wait(&wait); |
1490 | sg_init_one(&src, input_vec, ilen); | 1418 | sg_init_one(&src, input_vec, ilen); |
1491 | sg_init_one(&dst, output, dlen); | 1419 | sg_init_one(&dst, output, dlen); |
1492 | 1420 | ||
@@ -1501,9 +1429,9 @@ static int test_acomp(struct crypto_acomp *tfm, | |||
1501 | 1429 | ||
1502 | acomp_request_set_params(req, &src, &dst, ilen, dlen); | 1430 | acomp_request_set_params(req, &src, &dst, ilen, dlen); |
1503 | acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | 1431 | acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
1504 | tcrypt_complete, &result); | 1432 | crypto_req_done, &wait); |
1505 | 1433 | ||
1506 | ret = wait_async_op(&result, crypto_acomp_compress(req)); | 1434 | ret = crypto_wait_req(crypto_acomp_compress(req), &wait); |
1507 | if (ret) { | 1435 | if (ret) { |
1508 | pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n", | 1436 | pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n", |
1509 | i + 1, algo, -ret); | 1437 | i + 1, algo, -ret); |
@@ -1516,10 +1444,10 @@ static int test_acomp(struct crypto_acomp *tfm, | |||
1516 | dlen = COMP_BUF_SIZE; | 1444 | dlen = COMP_BUF_SIZE; |
1517 | sg_init_one(&src, output, ilen); | 1445 | sg_init_one(&src, output, ilen); |
1518 | sg_init_one(&dst, decomp_out, dlen); | 1446 | sg_init_one(&dst, decomp_out, dlen); |
1519 | init_completion(&result.completion); | 1447 | crypto_init_wait(&wait); |
1520 | acomp_request_set_params(req, &src, &dst, ilen, dlen); | 1448 | acomp_request_set_params(req, &src, &dst, ilen, dlen); |
1521 | 1449 | ||
1522 | ret = wait_async_op(&result, crypto_acomp_decompress(req)); | 1450 | ret = crypto_wait_req(crypto_acomp_decompress(req), &wait); |
1523 | if (ret) { | 1451 | if (ret) { |
1524 | pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n", | 1452 | pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n", |
1525 | i + 1, algo, -ret); | 1453 | i + 1, algo, -ret); |
@@ -1563,7 +1491,7 @@ static int test_acomp(struct crypto_acomp *tfm, | |||
1563 | } | 1491 | } |
1564 | 1492 | ||
1565 | memset(output, 0, dlen); | 1493 | memset(output, 0, dlen); |
1566 | init_completion(&result.completion); | 1494 | crypto_init_wait(&wait); |
1567 | sg_init_one(&src, input_vec, ilen); | 1495 | sg_init_one(&src, input_vec, ilen); |
1568 | sg_init_one(&dst, output, dlen); | 1496 | sg_init_one(&dst, output, dlen); |
1569 | 1497 | ||
@@ -1578,9 +1506,9 @@ static int test_acomp(struct crypto_acomp *tfm, | |||
1578 | 1506 | ||
1579 | acomp_request_set_params(req, &src, &dst, ilen, dlen); | 1507 | acomp_request_set_params(req, &src, &dst, ilen, dlen); |
1580 | acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | 1508 | acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
1581 | tcrypt_complete, &result); | 1509 | crypto_req_done, &wait); |
1582 | 1510 | ||
1583 | ret = wait_async_op(&result, crypto_acomp_decompress(req)); | 1511 | ret = crypto_wait_req(crypto_acomp_decompress(req), &wait); |
1584 | if (ret) { | 1512 | if (ret) { |
1585 | pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n", | 1513 | pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n", |
1586 | i + 1, algo, -ret); | 1514 | i + 1, algo, -ret); |
@@ -2000,7 +1928,7 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec, | |||
2000 | void *a_public = NULL; | 1928 | void *a_public = NULL; |
2001 | void *a_ss = NULL; | 1929 | void *a_ss = NULL; |
2002 | void *shared_secret = NULL; | 1930 | void *shared_secret = NULL; |
2003 | struct tcrypt_result result; | 1931 | struct crypto_wait wait; |
2004 | unsigned int out_len_max; | 1932 | unsigned int out_len_max; |
2005 | int err = -ENOMEM; | 1933 | int err = -ENOMEM; |
2006 | struct scatterlist src, dst; | 1934 | struct scatterlist src, dst; |
@@ -2009,7 +1937,7 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec, | |||
2009 | if (!req) | 1937 | if (!req) |
2010 | return err; | 1938 | return err; |
2011 | 1939 | ||
2012 | init_completion(&result.completion); | 1940 | crypto_init_wait(&wait); |
2013 | 1941 | ||
2014 | err = crypto_kpp_set_secret(tfm, vec->secret, vec->secret_size); | 1942 | err = crypto_kpp_set_secret(tfm, vec->secret, vec->secret_size); |
2015 | if (err < 0) | 1943 | if (err < 0) |
@@ -2027,10 +1955,10 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec, | |||
2027 | sg_init_one(&dst, output_buf, out_len_max); | 1955 | sg_init_one(&dst, output_buf, out_len_max); |
2028 | kpp_request_set_output(req, &dst, out_len_max); | 1956 | kpp_request_set_output(req, &dst, out_len_max); |
2029 | kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | 1957 | kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
2030 | tcrypt_complete, &result); | 1958 | crypto_req_done, &wait); |
2031 | 1959 | ||
2032 | /* Compute party A's public key */ | 1960 | /* Compute party A's public key */ |
2033 | err = wait_async_op(&result, crypto_kpp_generate_public_key(req)); | 1961 | err = crypto_wait_req(crypto_kpp_generate_public_key(req), &wait); |
2034 | if (err) { | 1962 | if (err) { |
2035 | pr_err("alg: %s: Party A: generate public key test failed. err %d\n", | 1963 | pr_err("alg: %s: Party A: generate public key test failed. err %d\n", |
2036 | alg, err); | 1964 | alg, err); |
@@ -2069,8 +1997,8 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec, | |||
2069 | kpp_request_set_input(req, &src, vec->b_public_size); | 1997 | kpp_request_set_input(req, &src, vec->b_public_size); |
2070 | kpp_request_set_output(req, &dst, out_len_max); | 1998 | kpp_request_set_output(req, &dst, out_len_max); |
2071 | kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | 1999 | kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
2072 | tcrypt_complete, &result); | 2000 | crypto_req_done, &wait); |
2073 | err = wait_async_op(&result, crypto_kpp_compute_shared_secret(req)); | 2001 | err = crypto_wait_req(crypto_kpp_compute_shared_secret(req), &wait); |
2074 | if (err) { | 2002 | if (err) { |
2075 | pr_err("alg: %s: Party A: compute shared secret test failed. err %d\n", | 2003 | pr_err("alg: %s: Party A: compute shared secret test failed. err %d\n", |
2076 | alg, err); | 2004 | alg, err); |
@@ -2100,9 +2028,9 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec, | |||
2100 | kpp_request_set_input(req, &src, vec->expected_a_public_size); | 2028 | kpp_request_set_input(req, &src, vec->expected_a_public_size); |
2101 | kpp_request_set_output(req, &dst, out_len_max); | 2029 | kpp_request_set_output(req, &dst, out_len_max); |
2102 | kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | 2030 | kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
2103 | tcrypt_complete, &result); | 2031 | crypto_req_done, &wait); |
2104 | err = wait_async_op(&result, | 2032 | err = crypto_wait_req(crypto_kpp_compute_shared_secret(req), |
2105 | crypto_kpp_compute_shared_secret(req)); | 2033 | &wait); |
2106 | if (err) { | 2034 | if (err) { |
2107 | pr_err("alg: %s: Party B: compute shared secret failed. err %d\n", | 2035 | pr_err("alg: %s: Party B: compute shared secret failed. err %d\n", |
2108 | alg, err); | 2036 | alg, err); |
@@ -2179,7 +2107,7 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, | |||
2179 | struct akcipher_request *req; | 2107 | struct akcipher_request *req; |
2180 | void *outbuf_enc = NULL; | 2108 | void *outbuf_enc = NULL; |
2181 | void *outbuf_dec = NULL; | 2109 | void *outbuf_dec = NULL; |
2182 | struct tcrypt_result result; | 2110 | struct crypto_wait wait; |
2183 | unsigned int out_len_max, out_len = 0; | 2111 | unsigned int out_len_max, out_len = 0; |
2184 | int err = -ENOMEM; | 2112 | int err = -ENOMEM; |
2185 | struct scatterlist src, dst, src_tab[2]; | 2113 | struct scatterlist src, dst, src_tab[2]; |
@@ -2191,7 +2119,7 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, | |||
2191 | if (!req) | 2119 | if (!req) |
2192 | goto free_xbuf; | 2120 | goto free_xbuf; |
2193 | 2121 | ||
2194 | init_completion(&result.completion); | 2122 | crypto_init_wait(&wait); |
2195 | 2123 | ||
2196 | if (vecs->public_key_vec) | 2124 | if (vecs->public_key_vec) |
2197 | err = crypto_akcipher_set_pub_key(tfm, vecs->key, | 2125 | err = crypto_akcipher_set_pub_key(tfm, vecs->key, |
@@ -2220,13 +2148,13 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, | |||
2220 | akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size, | 2148 | akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size, |
2221 | out_len_max); | 2149 | out_len_max); |
2222 | akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | 2150 | akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
2223 | tcrypt_complete, &result); | 2151 | crypto_req_done, &wait); |
2224 | 2152 | ||
2225 | err = wait_async_op(&result, vecs->siggen_sigver_test ? | 2153 | err = crypto_wait_req(vecs->siggen_sigver_test ? |
2226 | /* Run asymmetric signature generation */ | 2154 | /* Run asymmetric signature generation */ |
2227 | crypto_akcipher_sign(req) : | 2155 | crypto_akcipher_sign(req) : |
2228 | /* Run asymmetric encrypt */ | 2156 | /* Run asymmetric encrypt */ |
2229 | crypto_akcipher_encrypt(req)); | 2157 | crypto_akcipher_encrypt(req), &wait); |
2230 | if (err) { | 2158 | if (err) { |
2231 | pr_err("alg: akcipher: encrypt test failed. err %d\n", err); | 2159 | pr_err("alg: akcipher: encrypt test failed. err %d\n", err); |
2232 | goto free_all; | 2160 | goto free_all; |
@@ -2261,14 +2189,14 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, | |||
2261 | 2189 | ||
2262 | sg_init_one(&src, xbuf[0], vecs->c_size); | 2190 | sg_init_one(&src, xbuf[0], vecs->c_size); |
2263 | sg_init_one(&dst, outbuf_dec, out_len_max); | 2191 | sg_init_one(&dst, outbuf_dec, out_len_max); |
2264 | init_completion(&result.completion); | 2192 | crypto_init_wait(&wait); |
2265 | akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max); | 2193 | akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max); |
2266 | 2194 | ||
2267 | err = wait_async_op(&result, vecs->siggen_sigver_test ? | 2195 | err = crypto_wait_req(vecs->siggen_sigver_test ? |
2268 | /* Run asymmetric signature verification */ | 2196 | /* Run asymmetric signature verification */ |
2269 | crypto_akcipher_verify(req) : | 2197 | crypto_akcipher_verify(req) : |
2270 | /* Run asymmetric decrypt */ | 2198 | /* Run asymmetric decrypt */ |
2271 | crypto_akcipher_decrypt(req)); | 2199 | crypto_akcipher_decrypt(req), &wait); |
2272 | if (err) { | 2200 | if (err) { |
2273 | pr_err("alg: akcipher: decrypt test failed. err %d\n", err); | 2201 | pr_err("alg: akcipher: decrypt test failed. err %d\n", err); |
2274 | goto free_all; | 2202 | goto free_all; |
@@ -3500,6 +3428,12 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
3500 | .hash = __VECS(sha512_tv_template) | 3428 | .hash = __VECS(sha512_tv_template) |
3501 | } | 3429 | } |
3502 | }, { | 3430 | }, { |
3431 | .alg = "sm3", | ||
3432 | .test = alg_test_hash, | ||
3433 | .suite = { | ||
3434 | .hash = __VECS(sm3_tv_template) | ||
3435 | } | ||
3436 | }, { | ||
3503 | .alg = "tgr128", | 3437 | .alg = "tgr128", |
3504 | .test = alg_test_hash, | 3438 | .test = alg_test_hash, |
3505 | .suite = { | 3439 | .suite = { |
diff --git a/crypto/testmgr.h b/crypto/testmgr.h index d54971d2d1c8..a714b6293959 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h | |||
@@ -1497,6 +1497,73 @@ static const struct hash_testvec crct10dif_tv_template[] = { | |||
1497 | } | 1497 | } |
1498 | }; | 1498 | }; |
1499 | 1499 | ||
1500 | /* Example vectors below taken from | ||
1501 | * http://www.oscca.gov.cn/UpFile/20101222141857786.pdf | ||
1502 | * | ||
1503 | * The rest taken from | ||
1504 | * https://github.com/adamws/oscca-sm3 | ||
1505 | */ | ||
1506 | static const struct hash_testvec sm3_tv_template[] = { | ||
1507 | { | ||
1508 | .plaintext = "", | ||
1509 | .psize = 0, | ||
1510 | .digest = (u8 *)(u8 []) { | ||
1511 | 0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F, | ||
1512 | 0x8e, 0x61, 0x19, 0x48, 0x31, 0xE8, 0x1A, 0x8F, | ||
1513 | 0x22, 0xBE, 0xC8, 0xC7, 0x28, 0xFE, 0xFB, 0x74, | ||
1514 | 0x7E, 0xD0, 0x35, 0xEB, 0x50, 0x82, 0xAA, 0x2B } | ||
1515 | }, { | ||
1516 | .plaintext = "a", | ||
1517 | .psize = 1, | ||
1518 | .digest = (u8 *)(u8 []) { | ||
1519 | 0x62, 0x34, 0x76, 0xAC, 0x18, 0xF6, 0x5A, 0x29, | ||
1520 | 0x09, 0xE4, 0x3C, 0x7F, 0xEC, 0x61, 0xB4, 0x9C, | ||
1521 | 0x7E, 0x76, 0x4A, 0x91, 0xA1, 0x8C, 0xCB, 0x82, | ||
1522 | 0xF1, 0x91, 0x7A, 0x29, 0xC8, 0x6C, 0x5E, 0x88 } | ||
1523 | }, { | ||
1524 | /* A.1. Example 1 */ | ||
1525 | .plaintext = "abc", | ||
1526 | .psize = 3, | ||
1527 | .digest = (u8 *)(u8 []) { | ||
1528 | 0x66, 0xC7, 0xF0, 0xF4, 0x62, 0xEE, 0xED, 0xD9, | ||
1529 | 0xD1, 0xF2, 0xD4, 0x6B, 0xDC, 0x10, 0xE4, 0xE2, | ||
1530 | 0x41, 0x67, 0xC4, 0x87, 0x5C, 0xF2, 0xF7, 0xA2, | ||
1531 | 0x29, 0x7D, 0xA0, 0x2B, 0x8F, 0x4B, 0xA8, 0xE0 } | ||
1532 | }, { | ||
1533 | .plaintext = "abcdefghijklmnopqrstuvwxyz", | ||
1534 | .psize = 26, | ||
1535 | .digest = (u8 *)(u8 []) { | ||
1536 | 0xB8, 0x0F, 0xE9, 0x7A, 0x4D, 0xA2, 0x4A, 0xFC, | ||
1537 | 0x27, 0x75, 0x64, 0xF6, 0x6A, 0x35, 0x9E, 0xF4, | ||
1538 | 0x40, 0x46, 0x2A, 0xD2, 0x8D, 0xCC, 0x6D, 0x63, | ||
1539 | 0xAD, 0xB2, 0x4D, 0x5C, 0x20, 0xA6, 0x15, 0x95 } | ||
1540 | }, { | ||
1541 | /* A.1. Example 2 */ | ||
1542 | .plaintext = "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdab" | ||
1543 | "cdabcdabcdabcdabcd", | ||
1544 | .psize = 64, | ||
1545 | .digest = (u8 *)(u8 []) { | ||
1546 | 0xDE, 0xBE, 0x9F, 0xF9, 0x22, 0x75, 0xB8, 0xA1, | ||
1547 | 0x38, 0x60, 0x48, 0x89, 0xC1, 0x8E, 0x5A, 0x4D, | ||
1548 | 0x6F, 0xDB, 0x70, 0xE5, 0x38, 0x7E, 0x57, 0x65, | ||
1549 | 0x29, 0x3D, 0xCB, 0xA3, 0x9C, 0x0C, 0x57, 0x32 } | ||
1550 | }, { | ||
1551 | .plaintext = "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd" | ||
1552 | "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd" | ||
1553 | "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd" | ||
1554 | "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd" | ||
1555 | "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd" | ||
1556 | "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd" | ||
1557 | "abcdabcdabcdabcdabcdabcdabcdabcd", | ||
1558 | .psize = 256, | ||
1559 | .digest = (u8 *)(u8 []) { | ||
1560 | 0xB9, 0x65, 0x76, 0x4C, 0x8B, 0xEB, 0xB0, 0x91, | ||
1561 | 0xC7, 0x60, 0x2B, 0x74, 0xAF, 0xD3, 0x4E, 0xEF, | ||
1562 | 0xB5, 0x31, 0xDC, 0xCB, 0x4E, 0x00, 0x76, 0xD9, | ||
1563 | 0xB7, 0xCD, 0x81, 0x31, 0x99, 0xB4, 0x59, 0x71 } | ||
1564 | } | ||
1565 | }; | ||
1566 | |||
1500 | /* | 1567 | /* |
1501 | * SHA1 test vectors from from FIPS PUB 180-1 | 1568 | * SHA1 test vectors from from FIPS PUB 180-1 |
1502 | * Long vector from CAVS 5.0 | 1569 | * Long vector from CAVS 5.0 |
diff --git a/crypto/xts.c b/crypto/xts.c index e31828ed0046..f317c48b5e43 100644 --- a/crypto/xts.c +++ b/crypto/xts.c | |||
@@ -269,9 +269,7 @@ static int do_encrypt(struct skcipher_request *req, int err) | |||
269 | crypto_skcipher_encrypt(subreq) ?: | 269 | crypto_skcipher_encrypt(subreq) ?: |
270 | post_crypt(req); | 270 | post_crypt(req); |
271 | 271 | ||
272 | if (err == -EINPROGRESS || | 272 | if (err == -EINPROGRESS || err == -EBUSY) |
273 | (err == -EBUSY && | ||
274 | req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | ||
275 | return err; | 273 | return err; |
276 | } | 274 | } |
277 | 275 | ||
@@ -321,9 +319,7 @@ static int do_decrypt(struct skcipher_request *req, int err) | |||
321 | crypto_skcipher_decrypt(subreq) ?: | 319 | crypto_skcipher_decrypt(subreq) ?: |
322 | post_crypt(req); | 320 | post_crypt(req); |
323 | 321 | ||
324 | if (err == -EINPROGRESS || | 322 | if (err == -EINPROGRESS || err == -EBUSY) |
325 | (err == -EBUSY && | ||
326 | req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | ||
327 | return err; | 323 | return err; |
328 | } | 324 | } |
329 | 325 | ||
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 95a031e9eced..f6e3e5abc117 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig | |||
@@ -100,12 +100,12 @@ config HW_RANDOM_BCM2835 | |||
100 | If unsure, say Y. | 100 | If unsure, say Y. |
101 | 101 | ||
102 | config HW_RANDOM_IPROC_RNG200 | 102 | config HW_RANDOM_IPROC_RNG200 |
103 | tristate "Broadcom iProc RNG200 support" | 103 | tristate "Broadcom iProc/STB RNG200 support" |
104 | depends on ARCH_BCM_IPROC | 104 | depends on ARCH_BCM_IPROC || ARCH_BRCMSTB |
105 | default HW_RANDOM | 105 | default HW_RANDOM |
106 | ---help--- | 106 | ---help--- |
107 | This driver provides kernel-side support for the RNG200 | 107 | This driver provides kernel-side support for the RNG200 |
108 | hardware found on the Broadcom iProc SoCs. | 108 | hardware found on the Broadcom iProc and STB SoCs. |
109 | 109 | ||
110 | To compile this driver as a module, choose M here: the | 110 | To compile this driver as a module, choose M here: the |
111 | module will be called iproc-rng200 | 111 | module will be called iproc-rng200 |
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 9701ac7d8b47..657b8770b6b9 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c | |||
@@ -292,26 +292,48 @@ static struct miscdevice rng_miscdev = { | |||
292 | .groups = rng_dev_groups, | 292 | .groups = rng_dev_groups, |
293 | }; | 293 | }; |
294 | 294 | ||
295 | static int enable_best_rng(void) | ||
296 | { | ||
297 | int ret = -ENODEV; | ||
298 | |||
299 | BUG_ON(!mutex_is_locked(&rng_mutex)); | ||
300 | |||
301 | /* rng_list is sorted by quality, use the best (=first) one */ | ||
302 | if (!list_empty(&rng_list)) { | ||
303 | struct hwrng *new_rng; | ||
304 | |||
305 | new_rng = list_entry(rng_list.next, struct hwrng, list); | ||
306 | ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng)); | ||
307 | if (!ret) | ||
308 | cur_rng_set_by_user = 0; | ||
309 | } | ||
310 | |||
311 | return ret; | ||
312 | } | ||
313 | |||
295 | static ssize_t hwrng_attr_current_store(struct device *dev, | 314 | static ssize_t hwrng_attr_current_store(struct device *dev, |
296 | struct device_attribute *attr, | 315 | struct device_attribute *attr, |
297 | const char *buf, size_t len) | 316 | const char *buf, size_t len) |
298 | { | 317 | { |
299 | int err; | 318 | int err = -ENODEV; |
300 | struct hwrng *rng; | 319 | struct hwrng *rng; |
301 | 320 | ||
302 | err = mutex_lock_interruptible(&rng_mutex); | 321 | err = mutex_lock_interruptible(&rng_mutex); |
303 | if (err) | 322 | if (err) |
304 | return -ERESTARTSYS; | 323 | return -ERESTARTSYS; |
305 | err = -ENODEV; | 324 | |
306 | list_for_each_entry(rng, &rng_list, list) { | 325 | if (sysfs_streq(buf, "")) { |
307 | if (sysfs_streq(rng->name, buf)) { | 326 | err = enable_best_rng(); |
308 | err = 0; | 327 | } else { |
309 | cur_rng_set_by_user = 1; | 328 | list_for_each_entry(rng, &rng_list, list) { |
310 | if (rng != current_rng) | 329 | if (sysfs_streq(rng->name, buf)) { |
330 | cur_rng_set_by_user = 1; | ||
311 | err = set_current_rng(rng); | 331 | err = set_current_rng(rng); |
312 | break; | 332 | break; |
333 | } | ||
313 | } | 334 | } |
314 | } | 335 | } |
336 | |||
315 | mutex_unlock(&rng_mutex); | 337 | mutex_unlock(&rng_mutex); |
316 | 338 | ||
317 | return err ? : len; | 339 | return err ? : len; |
@@ -423,7 +445,7 @@ static void start_khwrngd(void) | |||
423 | { | 445 | { |
424 | hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng"); | 446 | hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng"); |
425 | if (IS_ERR(hwrng_fill)) { | 447 | if (IS_ERR(hwrng_fill)) { |
426 | pr_err("hwrng_fill thread creation failed"); | 448 | pr_err("hwrng_fill thread creation failed\n"); |
427 | hwrng_fill = NULL; | 449 | hwrng_fill = NULL; |
428 | } | 450 | } |
429 | } | 451 | } |
@@ -493,17 +515,8 @@ void hwrng_unregister(struct hwrng *rng) | |||
493 | mutex_lock(&rng_mutex); | 515 | mutex_lock(&rng_mutex); |
494 | 516 | ||
495 | list_del(&rng->list); | 517 | list_del(&rng->list); |
496 | if (current_rng == rng) { | 518 | if (current_rng == rng) |
497 | drop_current_rng(); | 519 | enable_best_rng(); |
498 | cur_rng_set_by_user = 0; | ||
499 | /* rng_list is sorted by quality, use the best (=first) one */ | ||
500 | if (!list_empty(&rng_list)) { | ||
501 | struct hwrng *new_rng; | ||
502 | |||
503 | new_rng = list_entry(rng_list.next, struct hwrng, list); | ||
504 | set_current_rng(new_rng); | ||
505 | } | ||
506 | } | ||
507 | 520 | ||
508 | if (list_empty(&rng_list)) { | 521 | if (list_empty(&rng_list)) { |
509 | mutex_unlock(&rng_mutex); | 522 | mutex_unlock(&rng_mutex); |
diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c index 3eaf7cb96d36..8b5a20b35293 100644 --- a/drivers/char/hw_random/iproc-rng200.c +++ b/drivers/char/hw_random/iproc-rng200.c | |||
@@ -220,6 +220,7 @@ static int iproc_rng200_probe(struct platform_device *pdev) | |||
220 | } | 220 | } |
221 | 221 | ||
222 | static const struct of_device_id iproc_rng200_of_match[] = { | 222 | static const struct of_device_id iproc_rng200_of_match[] = { |
223 | { .compatible = "brcm,bcm7278-rng200", }, | ||
223 | { .compatible = "brcm,iproc-rng200", }, | 224 | { .compatible = "brcm,iproc-rng200", }, |
224 | {}, | 225 | {}, |
225 | }; | 226 | }; |
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c index d9f46b437cc2..4e2a3f635277 100644 --- a/drivers/char/hw_random/pseries-rng.c +++ b/drivers/char/hw_random/pseries-rng.c | |||
@@ -72,7 +72,7 @@ static int pseries_rng_remove(struct vio_dev *dev) | |||
72 | return 0; | 72 | return 0; |
73 | } | 73 | } |
74 | 74 | ||
75 | static struct vio_device_id pseries_rng_driver_ids[] = { | 75 | static const struct vio_device_id pseries_rng_driver_ids[] = { |
76 | { "ibm,random-v1", "ibm,random"}, | 76 | { "ibm,random-v1", "ibm,random"}, |
77 | { "", "" } | 77 | { "", "" } |
78 | }; | 78 | }; |
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c index 03ff5483d865..f615684028af 100644 --- a/drivers/char/hw_random/timeriomem-rng.c +++ b/drivers/char/hw_random/timeriomem-rng.c | |||
@@ -53,13 +53,6 @@ static int timeriomem_rng_read(struct hwrng *hwrng, void *data, | |||
53 | int period_us = ktime_to_us(priv->period); | 53 | int period_us = ktime_to_us(priv->period); |
54 | 54 | ||
55 | /* | 55 | /* |
56 | * The RNG provides 32-bits per read. Ensure there is enough space for | ||
57 | * at minimum one read. | ||
58 | */ | ||
59 | if (max < sizeof(u32)) | ||
60 | return 0; | ||
61 | |||
62 | /* | ||
63 | * There may not have been enough time for new data to be generated | 56 | * There may not have been enough time for new data to be generated |
64 | * since the last request. If the caller doesn't want to wait, let them | 57 | * since the last request. If the caller doesn't want to wait, let them |
65 | * bail out. Otherwise, wait for the completion. If the new data has | 58 | * bail out. Otherwise, wait for the completion. If the new data has |
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index 3fa2f8a009b3..b89df66ea1ae 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c | |||
@@ -184,7 +184,26 @@ static int virtrng_freeze(struct virtio_device *vdev) | |||
184 | 184 | ||
185 | static int virtrng_restore(struct virtio_device *vdev) | 185 | static int virtrng_restore(struct virtio_device *vdev) |
186 | { | 186 | { |
187 | return probe_common(vdev); | 187 | int err; |
188 | |||
189 | err = probe_common(vdev); | ||
190 | if (!err) { | ||
191 | struct virtrng_info *vi = vdev->priv; | ||
192 | |||
193 | /* | ||
194 | * Set hwrng_removed to ensure that virtio_read() | ||
195 | * does not block waiting for data before the | ||
196 | * registration is complete. | ||
197 | */ | ||
198 | vi->hwrng_removed = true; | ||
199 | err = hwrng_register(&vi->hwrng); | ||
200 | if (!err) { | ||
201 | vi->hwrng_register_done = true; | ||
202 | vi->hwrng_removed = false; | ||
203 | } | ||
204 | } | ||
205 | |||
206 | return err; | ||
188 | } | 207 | } |
189 | #endif | 208 | #endif |
190 | 209 | ||
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index fe33c199fc1a..47ec920d5b71 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -199,22 +199,8 @@ config CRYPTO_CRC32_S390 | |||
199 | 199 | ||
200 | It is available with IBM z13 or later. | 200 | It is available with IBM z13 or later. |
201 | 201 | ||
202 | config CRYPTO_DEV_MV_CESA | ||
203 | tristate "Marvell's Cryptographic Engine" | ||
204 | depends on PLAT_ORION | ||
205 | select CRYPTO_AES | ||
206 | select CRYPTO_BLKCIPHER | ||
207 | select CRYPTO_HASH | ||
208 | select SRAM | ||
209 | help | ||
210 | This driver allows you to utilize the Cryptographic Engines and | ||
211 | Security Accelerator (CESA) which can be found on the Marvell Orion | ||
212 | and Kirkwood SoCs, such as QNAP's TS-209. | ||
213 | |||
214 | Currently the driver supports AES in ECB and CBC mode without DMA. | ||
215 | |||
216 | config CRYPTO_DEV_MARVELL_CESA | 202 | config CRYPTO_DEV_MARVELL_CESA |
217 | tristate "New Marvell's Cryptographic Engine driver" | 203 | tristate "Marvell's Cryptographic Engine driver" |
218 | depends on PLAT_ORION || ARCH_MVEBU | 204 | depends on PLAT_ORION || ARCH_MVEBU |
219 | select CRYPTO_AES | 205 | select CRYPTO_AES |
220 | select CRYPTO_DES | 206 | select CRYPTO_DES |
@@ -223,12 +209,10 @@ config CRYPTO_DEV_MARVELL_CESA | |||
223 | select SRAM | 209 | select SRAM |
224 | help | 210 | help |
225 | This driver allows you to utilize the Cryptographic Engines and | 211 | This driver allows you to utilize the Cryptographic Engines and |
226 | Security Accelerator (CESA) which can be found on the Armada 370. | 212 | Security Accelerator (CESA) which can be found on MVEBU and ORION |
213 | platforms. | ||
227 | This driver supports CPU offload through DMA transfers. | 214 | This driver supports CPU offload through DMA transfers. |
228 | 215 | ||
229 | This driver is aimed at replacing the mv_cesa driver. This will only | ||
230 | happen once it has received proper testing. | ||
231 | |||
232 | config CRYPTO_DEV_NIAGARA2 | 216 | config CRYPTO_DEV_NIAGARA2 |
233 | tristate "Niagara2 Stream Processing Unit driver" | 217 | tristate "Niagara2 Stream Processing Unit driver" |
234 | select CRYPTO_DES | 218 | select CRYPTO_DES |
@@ -315,6 +299,10 @@ config CRYPTO_DEV_PPC4XX | |||
315 | tristate "Driver AMCC PPC4xx crypto accelerator" | 299 | tristate "Driver AMCC PPC4xx crypto accelerator" |
316 | depends on PPC && 4xx | 300 | depends on PPC && 4xx |
317 | select CRYPTO_HASH | 301 | select CRYPTO_HASH |
302 | select CRYPTO_AEAD | ||
303 | select CRYPTO_AES | ||
304 | select CRYPTO_CCM | ||
305 | select CRYPTO_GCM | ||
318 | select CRYPTO_BLKCIPHER | 306 | select CRYPTO_BLKCIPHER |
319 | help | 307 | help |
320 | This option allows you to have support for AMCC crypto acceleration. | 308 | This option allows you to have support for AMCC crypto acceleration. |
@@ -439,6 +427,20 @@ config CRYPTO_DEV_S5P | |||
439 | Select this to offload Samsung S5PV210 or S5PC110, Exynos from AES | 427 | Select this to offload Samsung S5PV210 or S5PC110, Exynos from AES |
440 | algorithms execution. | 428 | algorithms execution. |
441 | 429 | ||
430 | config CRYPTO_DEV_EXYNOS_HASH | ||
431 | bool "Support for Samsung Exynos HASH accelerator" | ||
432 | depends on CRYPTO_DEV_S5P | ||
433 | depends on !CRYPTO_DEV_EXYNOS_RNG && CRYPTO_DEV_EXYNOS_RNG!=m | ||
434 | select CRYPTO_SHA1 | ||
435 | select CRYPTO_MD5 | ||
436 | select CRYPTO_SHA256 | ||
437 | help | ||
438 | Select this to offload Exynos from HASH MD5/SHA1/SHA256. | ||
439 | This will select software SHA1, MD5 and SHA256 as they are | ||
440 | needed for small and zero-size messages. | ||
441 | HASH algorithms will be disabled if EXYNOS_RNG | ||
442 | is enabled due to hw conflict. | ||
443 | |||
442 | config CRYPTO_DEV_NX | 444 | config CRYPTO_DEV_NX |
443 | bool "Support for IBM PowerPC Nest (NX) cryptographic acceleration" | 445 | bool "Support for IBM PowerPC Nest (NX) cryptographic acceleration" |
444 | depends on PPC64 | 446 | depends on PPC64 |
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index c00708d04be6..2513d13ea2c4 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile | |||
@@ -15,7 +15,6 @@ obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o | |||
15 | obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o | 15 | obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o |
16 | obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o | 16 | obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o |
17 | obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o | 17 | obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o |
18 | obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o | ||
19 | obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell/ | 18 | obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell/ |
20 | obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/ | 19 | obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/ |
21 | obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o | 20 | obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o |
diff --git a/drivers/crypto/amcc/Makefile b/drivers/crypto/amcc/Makefile index b95539928fdf..e33c185fc163 100644 --- a/drivers/crypto/amcc/Makefile +++ b/drivers/crypto/amcc/Makefile | |||
@@ -1,3 +1,3 @@ | |||
1 | obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o | 1 | obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o |
2 | crypto4xx-y := crypto4xx_core.o crypto4xx_alg.o crypto4xx_sa.o | 2 | crypto4xx-y := crypto4xx_core.o crypto4xx_alg.o |
3 | crypto4xx-$(CONFIG_HW_RANDOM_PPC4XX) += crypto4xx_trng.o | 3 | crypto4xx-$(CONFIG_HW_RANDOM_PPC4XX) += crypto4xx_trng.o |
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c index 4afca3968773..eeaf27859d80 100644 --- a/drivers/crypto/amcc/crypto4xx_alg.c +++ b/drivers/crypto/amcc/crypto4xx_alg.c | |||
@@ -26,11 +26,14 @@ | |||
26 | #include <crypto/internal/hash.h> | 26 | #include <crypto/internal/hash.h> |
27 | #include <linux/dma-mapping.h> | 27 | #include <linux/dma-mapping.h> |
28 | #include <crypto/algapi.h> | 28 | #include <crypto/algapi.h> |
29 | #include <crypto/aead.h> | ||
29 | #include <crypto/aes.h> | 30 | #include <crypto/aes.h> |
31 | #include <crypto/gcm.h> | ||
30 | #include <crypto/sha.h> | 32 | #include <crypto/sha.h> |
33 | #include <crypto/ctr.h> | ||
31 | #include "crypto4xx_reg_def.h" | 34 | #include "crypto4xx_reg_def.h" |
32 | #include "crypto4xx_sa.h" | ||
33 | #include "crypto4xx_core.h" | 35 | #include "crypto4xx_core.h" |
36 | #include "crypto4xx_sa.h" | ||
34 | 37 | ||
35 | static void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h, | 38 | static void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h, |
36 | u32 save_iv, u32 ld_h, u32 ld_iv, | 39 | u32 save_iv, u32 ld_h, u32 ld_iv, |
@@ -62,6 +65,7 @@ static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm, | |||
62 | sa->sa_command_1.bf.crypto_mode9_8 = cm & 3; | 65 | sa->sa_command_1.bf.crypto_mode9_8 = cm & 3; |
63 | sa->sa_command_1.bf.feedback_mode = cfb, | 66 | sa->sa_command_1.bf.feedback_mode = cfb, |
64 | sa->sa_command_1.bf.sa_rev = 1; | 67 | sa->sa_command_1.bf.sa_rev = 1; |
68 | sa->sa_command_1.bf.hmac_muting = hmac_mc; | ||
65 | sa->sa_command_1.bf.extended_seq_num = esn; | 69 | sa->sa_command_1.bf.extended_seq_num = esn; |
66 | sa->sa_command_1.bf.seq_num_mask = sn_mask; | 70 | sa->sa_command_1.bf.seq_num_mask = sn_mask; |
67 | sa->sa_command_1.bf.mutable_bit_proc = mute; | 71 | sa->sa_command_1.bf.mutable_bit_proc = mute; |
@@ -73,29 +77,29 @@ static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm, | |||
73 | int crypto4xx_encrypt(struct ablkcipher_request *req) | 77 | int crypto4xx_encrypt(struct ablkcipher_request *req) |
74 | { | 78 | { |
75 | struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | 79 | struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); |
80 | unsigned int ivlen = crypto_ablkcipher_ivsize( | ||
81 | crypto_ablkcipher_reqtfm(req)); | ||
82 | __le32 iv[ivlen]; | ||
76 | 83 | ||
77 | ctx->direction = DIR_OUTBOUND; | 84 | if (ivlen) |
78 | ctx->hash_final = 0; | 85 | crypto4xx_memcpy_to_le32(iv, req->info, ivlen); |
79 | ctx->is_hash = 0; | ||
80 | ctx->pd_ctl = 0x1; | ||
81 | 86 | ||
82 | return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, | 87 | return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, |
83 | req->nbytes, req->info, | 88 | req->nbytes, iv, ivlen, ctx->sa_out, ctx->sa_len, 0); |
84 | get_dynamic_sa_iv_size(ctx)); | ||
85 | } | 89 | } |
86 | 90 | ||
87 | int crypto4xx_decrypt(struct ablkcipher_request *req) | 91 | int crypto4xx_decrypt(struct ablkcipher_request *req) |
88 | { | 92 | { |
89 | struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | 93 | struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); |
94 | unsigned int ivlen = crypto_ablkcipher_ivsize( | ||
95 | crypto_ablkcipher_reqtfm(req)); | ||
96 | __le32 iv[ivlen]; | ||
90 | 97 | ||
91 | ctx->direction = DIR_INBOUND; | 98 | if (ivlen) |
92 | ctx->hash_final = 0; | 99 | crypto4xx_memcpy_to_le32(iv, req->info, ivlen); |
93 | ctx->is_hash = 0; | ||
94 | ctx->pd_ctl = 1; | ||
95 | 100 | ||
96 | return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, | 101 | return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, |
97 | req->nbytes, req->info, | 102 | req->nbytes, iv, ivlen, ctx->sa_in, ctx->sa_len, 0); |
98 | get_dynamic_sa_iv_size(ctx)); | ||
99 | } | 103 | } |
100 | 104 | ||
101 | /** | 105 | /** |
@@ -120,23 +124,15 @@ static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher, | |||
120 | } | 124 | } |
121 | 125 | ||
122 | /* Create SA */ | 126 | /* Create SA */ |
123 | if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr) | 127 | if (ctx->sa_in || ctx->sa_out) |
124 | crypto4xx_free_sa(ctx); | 128 | crypto4xx_free_sa(ctx); |
125 | 129 | ||
126 | rc = crypto4xx_alloc_sa(ctx, SA_AES128_LEN + (keylen-16) / 4); | 130 | rc = crypto4xx_alloc_sa(ctx, SA_AES128_LEN + (keylen-16) / 4); |
127 | if (rc) | 131 | if (rc) |
128 | return rc; | 132 | return rc; |
129 | 133 | ||
130 | if (ctx->state_record_dma_addr == 0) { | ||
131 | rc = crypto4xx_alloc_state_record(ctx); | ||
132 | if (rc) { | ||
133 | crypto4xx_free_sa(ctx); | ||
134 | return rc; | ||
135 | } | ||
136 | } | ||
137 | /* Setup SA */ | 134 | /* Setup SA */ |
138 | sa = (struct dynamic_sa_ctl *) ctx->sa_in; | 135 | sa = ctx->sa_in; |
139 | ctx->hash_final = 0; | ||
140 | 136 | ||
141 | set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV, | 137 | set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV, |
142 | SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE, | 138 | SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE, |
@@ -150,18 +146,13 @@ static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher, | |||
150 | SA_SEQ_MASK_OFF, SA_MC_ENABLE, | 146 | SA_SEQ_MASK_OFF, SA_MC_ENABLE, |
151 | SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD, | 147 | SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD, |
152 | SA_NOT_COPY_HDR); | 148 | SA_NOT_COPY_HDR); |
153 | crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx), | 149 | crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa), |
154 | key, keylen); | 150 | key, keylen); |
155 | sa->sa_contents = SA_AES_CONTENTS | (keylen << 2); | 151 | sa->sa_contents.w = SA_AES_CONTENTS | (keylen << 2); |
156 | sa->sa_command_1.bf.key_len = keylen >> 3; | 152 | sa->sa_command_1.bf.key_len = keylen >> 3; |
157 | ctx->is_hash = 0; | ||
158 | ctx->direction = DIR_INBOUND; | ||
159 | memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx), | ||
160 | (void *)&ctx->state_record_dma_addr, 4); | ||
161 | ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx); | ||
162 | 153 | ||
163 | memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4); | 154 | memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4); |
164 | sa = (struct dynamic_sa_ctl *) ctx->sa_out; | 155 | sa = ctx->sa_out; |
165 | sa->sa_command_0.bf.dir = DIR_OUTBOUND; | 156 | sa->sa_command_0.bf.dir = DIR_OUTBOUND; |
166 | 157 | ||
167 | return 0; | 158 | return 0; |
@@ -174,6 +165,396 @@ int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher, | |||
174 | CRYPTO_FEEDBACK_MODE_NO_FB); | 165 | CRYPTO_FEEDBACK_MODE_NO_FB); |
175 | } | 166 | } |
176 | 167 | ||
168 | int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher, | ||
169 | const u8 *key, unsigned int keylen) | ||
170 | { | ||
171 | return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CFB, | ||
172 | CRYPTO_FEEDBACK_MODE_128BIT_CFB); | ||
173 | } | ||
174 | |||
175 | int crypto4xx_setkey_aes_ecb(struct crypto_ablkcipher *cipher, | ||
176 | const u8 *key, unsigned int keylen) | ||
177 | { | ||
178 | return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_ECB, | ||
179 | CRYPTO_FEEDBACK_MODE_NO_FB); | ||
180 | } | ||
181 | |||
182 | int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher, | ||
183 | const u8 *key, unsigned int keylen) | ||
184 | { | ||
185 | return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_OFB, | ||
186 | CRYPTO_FEEDBACK_MODE_64BIT_OFB); | ||
187 | } | ||
188 | |||
189 | int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher, | ||
190 | const u8 *key, unsigned int keylen) | ||
191 | { | ||
192 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | ||
193 | struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); | ||
194 | int rc; | ||
195 | |||
196 | rc = crypto4xx_setkey_aes(cipher, key, keylen - CTR_RFC3686_NONCE_SIZE, | ||
197 | CRYPTO_MODE_CTR, CRYPTO_FEEDBACK_MODE_NO_FB); | ||
198 | if (rc) | ||
199 | return rc; | ||
200 | |||
201 | ctx->iv_nonce = cpu_to_le32p((u32 *)&key[keylen - | ||
202 | CTR_RFC3686_NONCE_SIZE]); | ||
203 | |||
204 | return 0; | ||
205 | } | ||
206 | |||
207 | int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req) | ||
208 | { | ||
209 | struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | ||
210 | __le32 iv[AES_IV_SIZE / 4] = { | ||
211 | ctx->iv_nonce, | ||
212 | cpu_to_le32p((u32 *) req->info), | ||
213 | cpu_to_le32p((u32 *) (req->info + 4)), | ||
214 | cpu_to_le32(1) }; | ||
215 | |||
216 | return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, | ||
217 | req->nbytes, iv, AES_IV_SIZE, | ||
218 | ctx->sa_out, ctx->sa_len, 0); | ||
219 | } | ||
220 | |||
221 | int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req) | ||
222 | { | ||
223 | struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | ||
224 | __le32 iv[AES_IV_SIZE / 4] = { | ||
225 | ctx->iv_nonce, | ||
226 | cpu_to_le32p((u32 *) req->info), | ||
227 | cpu_to_le32p((u32 *) (req->info + 4)), | ||
228 | cpu_to_le32(1) }; | ||
229 | |||
230 | return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, | ||
231 | req->nbytes, iv, AES_IV_SIZE, | ||
232 | ctx->sa_out, ctx->sa_len, 0); | ||
233 | } | ||
234 | |||
235 | static inline bool crypto4xx_aead_need_fallback(struct aead_request *req, | ||
236 | bool is_ccm, bool decrypt) | ||
237 | { | ||
238 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
239 | |||
240 | /* authsize has to be a multiple of 4 */ | ||
241 | if (aead->authsize & 3) | ||
242 | return true; | ||
243 | |||
244 | /* | ||
245 | * hardware does not handle cases where cryptlen | ||
246 | * is less than a block | ||
247 | */ | ||
248 | if (req->cryptlen < AES_BLOCK_SIZE) | ||
249 | return true; | ||
250 | |||
251 | /* assoc len needs to be a multiple of 4 */ | ||
252 | if (req->assoclen & 0x3) | ||
253 | return true; | ||
254 | |||
255 | /* CCM supports only counter field length of 2 and 4 bytes */ | ||
256 | if (is_ccm && !(req->iv[0] == 1 || req->iv[0] == 3)) | ||
257 | return true; | ||
258 | |||
259 | /* CCM - fix CBC MAC mismatch in special case */ | ||
260 | if (is_ccm && decrypt && !req->assoclen) | ||
261 | return true; | ||
262 | |||
263 | return false; | ||
264 | } | ||
265 | |||
266 | static int crypto4xx_aead_fallback(struct aead_request *req, | ||
267 | struct crypto4xx_ctx *ctx, bool do_decrypt) | ||
268 | { | ||
269 | char aead_req_data[sizeof(struct aead_request) + | ||
270 | crypto_aead_reqsize(ctx->sw_cipher.aead)] | ||
271 | __aligned(__alignof__(struct aead_request)); | ||
272 | |||
273 | struct aead_request *subreq = (void *) aead_req_data; | ||
274 | |||
275 | memset(subreq, 0, sizeof(aead_req_data)); | ||
276 | |||
277 | aead_request_set_tfm(subreq, ctx->sw_cipher.aead); | ||
278 | aead_request_set_callback(subreq, req->base.flags, | ||
279 | req->base.complete, req->base.data); | ||
280 | aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, | ||
281 | req->iv); | ||
282 | aead_request_set_ad(subreq, req->assoclen); | ||
283 | return do_decrypt ? crypto_aead_decrypt(subreq) : | ||
284 | crypto_aead_encrypt(subreq); | ||
285 | } | ||
286 | |||
287 | static int crypto4xx_setup_fallback(struct crypto4xx_ctx *ctx, | ||
288 | struct crypto_aead *cipher, | ||
289 | const u8 *key, | ||
290 | unsigned int keylen) | ||
291 | { | ||
292 | int rc; | ||
293 | |||
294 | crypto_aead_clear_flags(ctx->sw_cipher.aead, CRYPTO_TFM_REQ_MASK); | ||
295 | crypto_aead_set_flags(ctx->sw_cipher.aead, | ||
296 | crypto_aead_get_flags(cipher) & CRYPTO_TFM_REQ_MASK); | ||
297 | rc = crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen); | ||
298 | crypto_aead_clear_flags(cipher, CRYPTO_TFM_RES_MASK); | ||
299 | crypto_aead_set_flags(cipher, | ||
300 | crypto_aead_get_flags(ctx->sw_cipher.aead) & | ||
301 | CRYPTO_TFM_RES_MASK); | ||
302 | |||
303 | return rc; | ||
304 | } | ||
305 | |||
306 | /** | ||
307 | * AES-CCM Functions | ||
308 | */ | ||
309 | |||
310 | int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher, const u8 *key, | ||
311 | unsigned int keylen) | ||
312 | { | ||
313 | struct crypto_tfm *tfm = crypto_aead_tfm(cipher); | ||
314 | struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); | ||
315 | struct dynamic_sa_ctl *sa; | ||
316 | int rc = 0; | ||
317 | |||
318 | rc = crypto4xx_setup_fallback(ctx, cipher, key, keylen); | ||
319 | if (rc) | ||
320 | return rc; | ||
321 | |||
322 | if (ctx->sa_in || ctx->sa_out) | ||
323 | crypto4xx_free_sa(ctx); | ||
324 | |||
325 | rc = crypto4xx_alloc_sa(ctx, SA_AES128_CCM_LEN + (keylen - 16) / 4); | ||
326 | if (rc) | ||
327 | return rc; | ||
328 | |||
329 | /* Setup SA */ | ||
330 | sa = (struct dynamic_sa_ctl *) ctx->sa_in; | ||
331 | sa->sa_contents.w = SA_AES_CCM_CONTENTS | (keylen << 2); | ||
332 | |||
333 | set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV, | ||
334 | SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE, | ||
335 | SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC, | ||
336 | SA_CIPHER_ALG_AES, | ||
337 | SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC, | ||
338 | SA_OPCODE_HASH_DECRYPT, DIR_INBOUND); | ||
339 | |||
340 | set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH, | ||
341 | CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF, | ||
342 | SA_SEQ_MASK_OFF, SA_MC_ENABLE, | ||
343 | SA_NOT_COPY_PAD, SA_COPY_PAYLOAD, | ||
344 | SA_NOT_COPY_HDR); | ||
345 | |||
346 | sa->sa_command_1.bf.key_len = keylen >> 3; | ||
347 | |||
348 | crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa), key, keylen); | ||
349 | |||
350 | memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4); | ||
351 | sa = (struct dynamic_sa_ctl *) ctx->sa_out; | ||
352 | |||
353 | set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV, | ||
354 | SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE, | ||
355 | SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC, | ||
356 | SA_CIPHER_ALG_AES, | ||
357 | SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC, | ||
358 | SA_OPCODE_ENCRYPT_HASH, DIR_OUTBOUND); | ||
359 | |||
360 | set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH, | ||
361 | CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF, | ||
362 | SA_SEQ_MASK_OFF, SA_MC_ENABLE, | ||
363 | SA_COPY_PAD, SA_COPY_PAYLOAD, | ||
364 | SA_NOT_COPY_HDR); | ||
365 | |||
366 | sa->sa_command_1.bf.key_len = keylen >> 3; | ||
367 | return 0; | ||
368 | } | ||
369 | |||
370 | static int crypto4xx_crypt_aes_ccm(struct aead_request *req, bool decrypt) | ||
371 | { | ||
372 | struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | ||
373 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
374 | unsigned int len = req->cryptlen; | ||
375 | __le32 iv[16]; | ||
376 | u32 tmp_sa[ctx->sa_len * 4]; | ||
377 | struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *)tmp_sa; | ||
378 | |||
379 | if (crypto4xx_aead_need_fallback(req, true, decrypt)) | ||
380 | return crypto4xx_aead_fallback(req, ctx, decrypt); | ||
381 | |||
382 | if (decrypt) | ||
383 | len -= crypto_aead_authsize(aead); | ||
384 | |||
385 | memcpy(tmp_sa, decrypt ? ctx->sa_in : ctx->sa_out, sizeof(tmp_sa)); | ||
386 | sa->sa_command_0.bf.digest_len = crypto_aead_authsize(aead) >> 2; | ||
387 | |||
388 | if (req->iv[0] == 1) { | ||
389 | /* CRYPTO_MODE_AES_ICM */ | ||
390 | sa->sa_command_1.bf.crypto_mode9_8 = 1; | ||
391 | } | ||
392 | |||
393 | iv[3] = cpu_to_le32(0); | ||
394 | crypto4xx_memcpy_to_le32(iv, req->iv, 16 - (req->iv[0] + 1)); | ||
395 | |||
396 | return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, | ||
397 | len, iv, sizeof(iv), | ||
398 | sa, ctx->sa_len, req->assoclen); | ||
399 | } | ||
400 | |||
401 | int crypto4xx_encrypt_aes_ccm(struct aead_request *req) | ||
402 | { | ||
403 | return crypto4xx_crypt_aes_ccm(req, false); | ||
404 | } | ||
405 | |||
406 | int crypto4xx_decrypt_aes_ccm(struct aead_request *req) | ||
407 | { | ||
408 | return crypto4xx_crypt_aes_ccm(req, true); | ||
409 | } | ||
410 | |||
411 | int crypto4xx_setauthsize_aead(struct crypto_aead *cipher, | ||
412 | unsigned int authsize) | ||
413 | { | ||
414 | struct crypto_tfm *tfm = crypto_aead_tfm(cipher); | ||
415 | struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); | ||
416 | |||
417 | return crypto_aead_setauthsize(ctx->sw_cipher.aead, authsize); | ||
418 | } | ||
419 | |||
420 | /** | ||
421 | * AES-GCM Functions | ||
422 | */ | ||
423 | |||
424 | static int crypto4xx_aes_gcm_validate_keylen(unsigned int keylen) | ||
425 | { | ||
426 | switch (keylen) { | ||
427 | case 16: | ||
428 | case 24: | ||
429 | case 32: | ||
430 | return 0; | ||
431 | default: | ||
432 | return -EINVAL; | ||
433 | } | ||
434 | } | ||
435 | |||
436 | static int crypto4xx_compute_gcm_hash_key_sw(__le32 *hash_start, const u8 *key, | ||
437 | unsigned int keylen) | ||
438 | { | ||
439 | struct crypto_cipher *aes_tfm = NULL; | ||
440 | uint8_t src[16] = { 0 }; | ||
441 | int rc = 0; | ||
442 | |||
443 | aes_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC | | ||
444 | CRYPTO_ALG_NEED_FALLBACK); | ||
445 | if (IS_ERR(aes_tfm)) { | ||
446 | rc = PTR_ERR(aes_tfm); | ||
447 | pr_warn("could not load aes cipher driver: %d\n", rc); | ||
448 | return rc; | ||
449 | } | ||
450 | |||
451 | rc = crypto_cipher_setkey(aes_tfm, key, keylen); | ||
452 | if (rc) { | ||
453 | pr_err("setkey() failed: %d\n", rc); | ||
454 | goto out; | ||
455 | } | ||
456 | |||
457 | crypto_cipher_encrypt_one(aes_tfm, src, src); | ||
458 | crypto4xx_memcpy_to_le32(hash_start, src, 16); | ||
459 | out: | ||
460 | crypto_free_cipher(aes_tfm); | ||
461 | return rc; | ||
462 | } | ||
463 | |||
464 | int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher, | ||
465 | const u8 *key, unsigned int keylen) | ||
466 | { | ||
467 | struct crypto_tfm *tfm = crypto_aead_tfm(cipher); | ||
468 | struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); | ||
469 | struct dynamic_sa_ctl *sa; | ||
470 | int rc = 0; | ||
471 | |||
472 | if (crypto4xx_aes_gcm_validate_keylen(keylen) != 0) { | ||
473 | crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
474 | return -EINVAL; | ||
475 | } | ||
476 | |||
477 | rc = crypto4xx_setup_fallback(ctx, cipher, key, keylen); | ||
478 | if (rc) | ||
479 | return rc; | ||
480 | |||
481 | if (ctx->sa_in || ctx->sa_out) | ||
482 | crypto4xx_free_sa(ctx); | ||
483 | |||
484 | rc = crypto4xx_alloc_sa(ctx, SA_AES128_GCM_LEN + (keylen - 16) / 4); | ||
485 | if (rc) | ||
486 | return rc; | ||
487 | |||
488 | sa = (struct dynamic_sa_ctl *) ctx->sa_in; | ||
489 | |||
490 | sa->sa_contents.w = SA_AES_GCM_CONTENTS | (keylen << 2); | ||
491 | set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV, | ||
492 | SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE, | ||
493 | SA_NO_HEADER_PROC, SA_HASH_ALG_GHASH, | ||
494 | SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO, | ||
495 | SA_OP_GROUP_BASIC, SA_OPCODE_HASH_DECRYPT, | ||
496 | DIR_INBOUND); | ||
497 | set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH, | ||
498 | CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF, | ||
499 | SA_SEQ_MASK_ON, SA_MC_DISABLE, | ||
500 | SA_NOT_COPY_PAD, SA_COPY_PAYLOAD, | ||
501 | SA_NOT_COPY_HDR); | ||
502 | |||
503 | sa->sa_command_1.bf.key_len = keylen >> 3; | ||
504 | |||
505 | crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa), | ||
506 | key, keylen); | ||
507 | |||
508 | rc = crypto4xx_compute_gcm_hash_key_sw(get_dynamic_sa_inner_digest(sa), | ||
509 | key, keylen); | ||
510 | if (rc) { | ||
511 | pr_err("GCM hash key setting failed = %d\n", rc); | ||
512 | goto err; | ||
513 | } | ||
514 | |||
515 | memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4); | ||
516 | sa = (struct dynamic_sa_ctl *) ctx->sa_out; | ||
517 | sa->sa_command_0.bf.dir = DIR_OUTBOUND; | ||
518 | sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT_HASH; | ||
519 | |||
520 | return 0; | ||
521 | err: | ||
522 | crypto4xx_free_sa(ctx); | ||
523 | return rc; | ||
524 | } | ||
525 | |||
526 | static inline int crypto4xx_crypt_aes_gcm(struct aead_request *req, | ||
527 | bool decrypt) | ||
528 | { | ||
529 | struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | ||
530 | unsigned int len = req->cryptlen; | ||
531 | __le32 iv[4]; | ||
532 | |||
533 | if (crypto4xx_aead_need_fallback(req, false, decrypt)) | ||
534 | return crypto4xx_aead_fallback(req, ctx, decrypt); | ||
535 | |||
536 | crypto4xx_memcpy_to_le32(iv, req->iv, GCM_AES_IV_SIZE); | ||
537 | iv[3] = cpu_to_le32(1); | ||
538 | |||
539 | if (decrypt) | ||
540 | len -= crypto_aead_authsize(crypto_aead_reqtfm(req)); | ||
541 | |||
542 | return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, | ||
543 | len, iv, sizeof(iv), | ||
544 | decrypt ? ctx->sa_in : ctx->sa_out, | ||
545 | ctx->sa_len, req->assoclen); | ||
546 | } | ||
547 | |||
548 | int crypto4xx_encrypt_aes_gcm(struct aead_request *req) | ||
549 | { | ||
550 | return crypto4xx_crypt_aes_gcm(req, false); | ||
551 | } | ||
552 | |||
553 | int crypto4xx_decrypt_aes_gcm(struct aead_request *req) | ||
554 | { | ||
555 | return crypto4xx_crypt_aes_gcm(req, true); | ||
556 | } | ||
557 | |||
177 | /** | 558 | /** |
178 | * HASH SHA1 Functions | 559 | * HASH SHA1 Functions |
179 | */ | 560 | */ |
@@ -183,53 +564,39 @@ static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm, | |||
183 | unsigned char hm) | 564 | unsigned char hm) |
184 | { | 565 | { |
185 | struct crypto_alg *alg = tfm->__crt_alg; | 566 | struct crypto_alg *alg = tfm->__crt_alg; |
186 | struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg); | 567 | struct crypto4xx_alg *my_alg; |
187 | struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); | 568 | struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); |
188 | struct dynamic_sa_ctl *sa; | 569 | struct dynamic_sa_hash160 *sa; |
189 | struct dynamic_sa_hash160 *sa_in; | ||
190 | int rc; | 570 | int rc; |
191 | 571 | ||
572 | my_alg = container_of(__crypto_ahash_alg(alg), struct crypto4xx_alg, | ||
573 | alg.u.hash); | ||
192 | ctx->dev = my_alg->dev; | 574 | ctx->dev = my_alg->dev; |
193 | ctx->is_hash = 1; | ||
194 | ctx->hash_final = 0; | ||
195 | 575 | ||
196 | /* Create SA */ | 576 | /* Create SA */ |
197 | if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr) | 577 | if (ctx->sa_in || ctx->sa_out) |
198 | crypto4xx_free_sa(ctx); | 578 | crypto4xx_free_sa(ctx); |
199 | 579 | ||
200 | rc = crypto4xx_alloc_sa(ctx, sa_len); | 580 | rc = crypto4xx_alloc_sa(ctx, sa_len); |
201 | if (rc) | 581 | if (rc) |
202 | return rc; | 582 | return rc; |
203 | 583 | ||
204 | if (ctx->state_record_dma_addr == 0) { | ||
205 | crypto4xx_alloc_state_record(ctx); | ||
206 | if (!ctx->state_record_dma_addr) { | ||
207 | crypto4xx_free_sa(ctx); | ||
208 | return -ENOMEM; | ||
209 | } | ||
210 | } | ||
211 | |||
212 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | 584 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
213 | sizeof(struct crypto4xx_ctx)); | 585 | sizeof(struct crypto4xx_ctx)); |
214 | sa = (struct dynamic_sa_ctl *) ctx->sa_in; | 586 | sa = (struct dynamic_sa_hash160 *)ctx->sa_in; |
215 | set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV, | 587 | set_dynamic_sa_command_0(&sa->ctrl, SA_SAVE_HASH, SA_NOT_SAVE_IV, |
216 | SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA, | 588 | SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA, |
217 | SA_NO_HEADER_PROC, ha, SA_CIPHER_ALG_NULL, | 589 | SA_NO_HEADER_PROC, ha, SA_CIPHER_ALG_NULL, |
218 | SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC, | 590 | SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC, |
219 | SA_OPCODE_HASH, DIR_INBOUND); | 591 | SA_OPCODE_HASH, DIR_INBOUND); |
220 | set_dynamic_sa_command_1(sa, 0, SA_HASH_MODE_HASH, | 592 | set_dynamic_sa_command_1(&sa->ctrl, 0, SA_HASH_MODE_HASH, |
221 | CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF, | 593 | CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF, |
222 | SA_SEQ_MASK_OFF, SA_MC_ENABLE, | 594 | SA_SEQ_MASK_OFF, SA_MC_ENABLE, |
223 | SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD, | 595 | SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD, |
224 | SA_NOT_COPY_HDR); | 596 | SA_NOT_COPY_HDR); |
225 | ctx->direction = DIR_INBOUND; | ||
226 | sa->sa_contents = SA_HASH160_CONTENTS; | ||
227 | sa_in = (struct dynamic_sa_hash160 *) ctx->sa_in; | ||
228 | /* Need to zero hash digest in SA */ | 597 | /* Need to zero hash digest in SA */ |
229 | memset(sa_in->inner_digest, 0, sizeof(sa_in->inner_digest)); | 598 | memset(sa->inner_digest, 0, sizeof(sa->inner_digest)); |
230 | memset(sa_in->outer_digest, 0, sizeof(sa_in->outer_digest)); | 599 | memset(sa->outer_digest, 0, sizeof(sa->outer_digest)); |
231 | sa_in->state_ptr = ctx->state_record_dma_addr; | ||
232 | ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx); | ||
233 | 600 | ||
234 | return 0; | 601 | return 0; |
235 | } | 602 | } |
@@ -240,29 +607,27 @@ int crypto4xx_hash_init(struct ahash_request *req) | |||
240 | int ds; | 607 | int ds; |
241 | struct dynamic_sa_ctl *sa; | 608 | struct dynamic_sa_ctl *sa; |
242 | 609 | ||
243 | sa = (struct dynamic_sa_ctl *) ctx->sa_in; | 610 | sa = ctx->sa_in; |
244 | ds = crypto_ahash_digestsize( | 611 | ds = crypto_ahash_digestsize( |
245 | __crypto_ahash_cast(req->base.tfm)); | 612 | __crypto_ahash_cast(req->base.tfm)); |
246 | sa->sa_command_0.bf.digest_len = ds >> 2; | 613 | sa->sa_command_0.bf.digest_len = ds >> 2; |
247 | sa->sa_command_0.bf.load_hash_state = SA_LOAD_HASH_FROM_SA; | 614 | sa->sa_command_0.bf.load_hash_state = SA_LOAD_HASH_FROM_SA; |
248 | ctx->is_hash = 1; | ||
249 | ctx->direction = DIR_INBOUND; | ||
250 | 615 | ||
251 | return 0; | 616 | return 0; |
252 | } | 617 | } |
253 | 618 | ||
254 | int crypto4xx_hash_update(struct ahash_request *req) | 619 | int crypto4xx_hash_update(struct ahash_request *req) |
255 | { | 620 | { |
621 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
256 | struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | 622 | struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); |
623 | struct scatterlist dst; | ||
624 | unsigned int ds = crypto_ahash_digestsize(ahash); | ||
257 | 625 | ||
258 | ctx->is_hash = 1; | 626 | sg_init_one(&dst, req->result, ds); |
259 | ctx->hash_final = 0; | ||
260 | ctx->pd_ctl = 0x11; | ||
261 | ctx->direction = DIR_INBOUND; | ||
262 | 627 | ||
263 | return crypto4xx_build_pd(&req->base, ctx, req->src, | 628 | return crypto4xx_build_pd(&req->base, ctx, req->src, &dst, |
264 | (struct scatterlist *) req->result, | 629 | req->nbytes, NULL, 0, ctx->sa_in, |
265 | req->nbytes, NULL, 0); | 630 | ctx->sa_len, 0); |
266 | } | 631 | } |
267 | 632 | ||
268 | int crypto4xx_hash_final(struct ahash_request *req) | 633 | int crypto4xx_hash_final(struct ahash_request *req) |
@@ -272,15 +637,16 @@ int crypto4xx_hash_final(struct ahash_request *req) | |||
272 | 637 | ||
273 | int crypto4xx_hash_digest(struct ahash_request *req) | 638 | int crypto4xx_hash_digest(struct ahash_request *req) |
274 | { | 639 | { |
640 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
275 | struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | 641 | struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); |
642 | struct scatterlist dst; | ||
643 | unsigned int ds = crypto_ahash_digestsize(ahash); | ||
276 | 644 | ||
277 | ctx->hash_final = 1; | 645 | sg_init_one(&dst, req->result, ds); |
278 | ctx->pd_ctl = 0x11; | ||
279 | ctx->direction = DIR_INBOUND; | ||
280 | 646 | ||
281 | return crypto4xx_build_pd(&req->base, ctx, req->src, | 647 | return crypto4xx_build_pd(&req->base, ctx, req->src, &dst, |
282 | (struct scatterlist *) req->result, | 648 | req->nbytes, NULL, 0, ctx->sa_in, |
283 | req->nbytes, NULL, 0); | 649 | ctx->sa_len, 0); |
284 | } | 650 | } |
285 | 651 | ||
286 | /** | 652 | /** |
@@ -291,5 +657,3 @@ int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm) | |||
291 | return crypto4xx_hash_alg_init(tfm, SA_HASH160_LEN, SA_HASH_ALG_SHA1, | 657 | return crypto4xx_hash_alg_init(tfm, SA_HASH160_LEN, SA_HASH_ALG_SHA1, |
292 | SA_HASH_MODE_HASH); | 658 | SA_HASH_MODE_HASH); |
293 | } | 659 | } |
294 | |||
295 | |||
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 65dc78b91dea..c44954e274bc 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c | |||
@@ -35,8 +35,14 @@ | |||
35 | #include <asm/dcr.h> | 35 | #include <asm/dcr.h> |
36 | #include <asm/dcr-regs.h> | 36 | #include <asm/dcr-regs.h> |
37 | #include <asm/cacheflush.h> | 37 | #include <asm/cacheflush.h> |
38 | #include <crypto/aead.h> | ||
38 | #include <crypto/aes.h> | 39 | #include <crypto/aes.h> |
40 | #include <crypto/ctr.h> | ||
41 | #include <crypto/gcm.h> | ||
39 | #include <crypto/sha.h> | 42 | #include <crypto/sha.h> |
43 | #include <crypto/scatterwalk.h> | ||
44 | #include <crypto/internal/aead.h> | ||
45 | #include <crypto/internal/skcipher.h> | ||
40 | #include "crypto4xx_reg_def.h" | 46 | #include "crypto4xx_reg_def.h" |
41 | #include "crypto4xx_core.h" | 47 | #include "crypto4xx_core.h" |
42 | #include "crypto4xx_sa.h" | 48 | #include "crypto4xx_sa.h" |
@@ -127,21 +133,17 @@ static void crypto4xx_hw_init(struct crypto4xx_device *dev) | |||
127 | 133 | ||
128 | int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size) | 134 | int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size) |
129 | { | 135 | { |
130 | ctx->sa_in = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4, | 136 | ctx->sa_in = kzalloc(size * 4, GFP_ATOMIC); |
131 | &ctx->sa_in_dma_addr, GFP_ATOMIC); | ||
132 | if (ctx->sa_in == NULL) | 137 | if (ctx->sa_in == NULL) |
133 | return -ENOMEM; | 138 | return -ENOMEM; |
134 | 139 | ||
135 | ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4, | 140 | ctx->sa_out = kzalloc(size * 4, GFP_ATOMIC); |
136 | &ctx->sa_out_dma_addr, GFP_ATOMIC); | ||
137 | if (ctx->sa_out == NULL) { | 141 | if (ctx->sa_out == NULL) { |
138 | dma_free_coherent(ctx->dev->core_dev->device, size * 4, | 142 | kfree(ctx->sa_in); |
139 | ctx->sa_in, ctx->sa_in_dma_addr); | 143 | ctx->sa_in = NULL; |
140 | return -ENOMEM; | 144 | return -ENOMEM; |
141 | } | 145 | } |
142 | 146 | ||
143 | memset(ctx->sa_in, 0, size * 4); | ||
144 | memset(ctx->sa_out, 0, size * 4); | ||
145 | ctx->sa_len = size; | 147 | ctx->sa_len = size; |
146 | 148 | ||
147 | return 0; | 149 | return 0; |
@@ -149,40 +151,13 @@ int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size) | |||
149 | 151 | ||
150 | void crypto4xx_free_sa(struct crypto4xx_ctx *ctx) | 152 | void crypto4xx_free_sa(struct crypto4xx_ctx *ctx) |
151 | { | 153 | { |
152 | if (ctx->sa_in != NULL) | 154 | kfree(ctx->sa_in); |
153 | dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4, | 155 | ctx->sa_in = NULL; |
154 | ctx->sa_in, ctx->sa_in_dma_addr); | 156 | kfree(ctx->sa_out); |
155 | if (ctx->sa_out != NULL) | 157 | ctx->sa_out = NULL; |
156 | dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4, | ||
157 | ctx->sa_out, ctx->sa_out_dma_addr); | ||
158 | |||
159 | ctx->sa_in_dma_addr = 0; | ||
160 | ctx->sa_out_dma_addr = 0; | ||
161 | ctx->sa_len = 0; | 158 | ctx->sa_len = 0; |
162 | } | 159 | } |
163 | 160 | ||
164 | u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx) | ||
165 | { | ||
166 | ctx->state_record = dma_alloc_coherent(ctx->dev->core_dev->device, | ||
167 | sizeof(struct sa_state_record), | ||
168 | &ctx->state_record_dma_addr, GFP_ATOMIC); | ||
169 | if (!ctx->state_record_dma_addr) | ||
170 | return -ENOMEM; | ||
171 | memset(ctx->state_record, 0, sizeof(struct sa_state_record)); | ||
172 | |||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx) | ||
177 | { | ||
178 | if (ctx->state_record != NULL) | ||
179 | dma_free_coherent(ctx->dev->core_dev->device, | ||
180 | sizeof(struct sa_state_record), | ||
181 | ctx->state_record, | ||
182 | ctx->state_record_dma_addr); | ||
183 | ctx->state_record_dma_addr = 0; | ||
184 | } | ||
185 | |||
186 | /** | 161 | /** |
187 | * alloc memory for the gather ring | 162 | * alloc memory for the gather ring |
188 | * no need to alloc buf for the ring | 163 | * no need to alloc buf for the ring |
@@ -191,7 +166,6 @@ void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx) | |||
191 | static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev) | 166 | static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev) |
192 | { | 167 | { |
193 | int i; | 168 | int i; |
194 | struct pd_uinfo *pd_uinfo; | ||
195 | dev->pdr = dma_alloc_coherent(dev->core_dev->device, | 169 | dev->pdr = dma_alloc_coherent(dev->core_dev->device, |
196 | sizeof(struct ce_pd) * PPC4XX_NUM_PD, | 170 | sizeof(struct ce_pd) * PPC4XX_NUM_PD, |
197 | &dev->pdr_pa, GFP_ATOMIC); | 171 | &dev->pdr_pa, GFP_ATOMIC); |
@@ -207,9 +181,9 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev) | |||
207 | dev->pdr_pa); | 181 | dev->pdr_pa); |
208 | return -ENOMEM; | 182 | return -ENOMEM; |
209 | } | 183 | } |
210 | memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD); | 184 | memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD); |
211 | dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device, | 185 | dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device, |
212 | 256 * PPC4XX_NUM_PD, | 186 | sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD, |
213 | &dev->shadow_sa_pool_pa, | 187 | &dev->shadow_sa_pool_pa, |
214 | GFP_ATOMIC); | 188 | GFP_ATOMIC); |
215 | if (!dev->shadow_sa_pool) | 189 | if (!dev->shadow_sa_pool) |
@@ -221,16 +195,17 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev) | |||
221 | if (!dev->shadow_sr_pool) | 195 | if (!dev->shadow_sr_pool) |
222 | return -ENOMEM; | 196 | return -ENOMEM; |
223 | for (i = 0; i < PPC4XX_NUM_PD; i++) { | 197 | for (i = 0; i < PPC4XX_NUM_PD; i++) { |
224 | pd_uinfo = (struct pd_uinfo *) (dev->pdr_uinfo + | 198 | struct ce_pd *pd = &dev->pdr[i]; |
225 | sizeof(struct pd_uinfo) * i); | 199 | struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[i]; |
200 | |||
201 | pd->sa = dev->shadow_sa_pool_pa + | ||
202 | sizeof(union shadow_sa_buf) * i; | ||
226 | 203 | ||
227 | /* alloc 256 bytes which is enough for any kind of dynamic sa */ | 204 | /* alloc 256 bytes which is enough for any kind of dynamic sa */ |
228 | pd_uinfo->sa_va = dev->shadow_sa_pool + 256 * i; | 205 | pd_uinfo->sa_va = &dev->shadow_sa_pool[i].sa; |
229 | pd_uinfo->sa_pa = dev->shadow_sa_pool_pa + 256 * i; | ||
230 | 206 | ||
231 | /* alloc state record */ | 207 | /* alloc state record */ |
232 | pd_uinfo->sr_va = dev->shadow_sr_pool + | 208 | pd_uinfo->sr_va = &dev->shadow_sr_pool[i]; |
233 | sizeof(struct sa_state_record) * i; | ||
234 | pd_uinfo->sr_pa = dev->shadow_sr_pool_pa + | 209 | pd_uinfo->sr_pa = dev->shadow_sr_pool_pa + |
235 | sizeof(struct sa_state_record) * i; | 210 | sizeof(struct sa_state_record) * i; |
236 | } | 211 | } |
@@ -240,13 +215,16 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev) | |||
240 | 215 | ||
241 | static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev) | 216 | static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev) |
242 | { | 217 | { |
243 | if (dev->pdr != NULL) | 218 | if (dev->pdr) |
244 | dma_free_coherent(dev->core_dev->device, | 219 | dma_free_coherent(dev->core_dev->device, |
245 | sizeof(struct ce_pd) * PPC4XX_NUM_PD, | 220 | sizeof(struct ce_pd) * PPC4XX_NUM_PD, |
246 | dev->pdr, dev->pdr_pa); | 221 | dev->pdr, dev->pdr_pa); |
222 | |||
247 | if (dev->shadow_sa_pool) | 223 | if (dev->shadow_sa_pool) |
248 | dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD, | 224 | dma_free_coherent(dev->core_dev->device, |
249 | dev->shadow_sa_pool, dev->shadow_sa_pool_pa); | 225 | sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD, |
226 | dev->shadow_sa_pool, dev->shadow_sa_pool_pa); | ||
227 | |||
250 | if (dev->shadow_sr_pool) | 228 | if (dev->shadow_sr_pool) |
251 | dma_free_coherent(dev->core_dev->device, | 229 | dma_free_coherent(dev->core_dev->device, |
252 | sizeof(struct sa_state_record) * PPC4XX_NUM_PD, | 230 | sizeof(struct sa_state_record) * PPC4XX_NUM_PD, |
@@ -273,28 +251,21 @@ static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev) | |||
273 | 251 | ||
274 | static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx) | 252 | static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx) |
275 | { | 253 | { |
276 | struct pd_uinfo *pd_uinfo; | 254 | struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx]; |
255 | u32 tail; | ||
277 | unsigned long flags; | 256 | unsigned long flags; |
278 | 257 | ||
279 | pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo + | ||
280 | sizeof(struct pd_uinfo) * idx); | ||
281 | spin_lock_irqsave(&dev->core_dev->lock, flags); | 258 | spin_lock_irqsave(&dev->core_dev->lock, flags); |
259 | pd_uinfo->state = PD_ENTRY_FREE; | ||
260 | |||
282 | if (dev->pdr_tail != PPC4XX_LAST_PD) | 261 | if (dev->pdr_tail != PPC4XX_LAST_PD) |
283 | dev->pdr_tail++; | 262 | dev->pdr_tail++; |
284 | else | 263 | else |
285 | dev->pdr_tail = 0; | 264 | dev->pdr_tail = 0; |
286 | pd_uinfo->state = PD_ENTRY_FREE; | 265 | tail = dev->pdr_tail; |
287 | spin_unlock_irqrestore(&dev->core_dev->lock, flags); | 266 | spin_unlock_irqrestore(&dev->core_dev->lock, flags); |
288 | 267 | ||
289 | return 0; | 268 | return tail; |
290 | } | ||
291 | |||
292 | static struct ce_pd *crypto4xx_get_pdp(struct crypto4xx_device *dev, | ||
293 | dma_addr_t *pd_dma, u32 idx) | ||
294 | { | ||
295 | *pd_dma = dev->pdr_pa + sizeof(struct ce_pd) * idx; | ||
296 | |||
297 | return dev->pdr + sizeof(struct ce_pd) * idx; | ||
298 | } | 269 | } |
299 | 270 | ||
300 | /** | 271 | /** |
@@ -326,10 +297,11 @@ static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev) | |||
326 | * when this function is called. | 297 | * when this function is called. |
327 | * preemption or interrupt must be disabled | 298 | * preemption or interrupt must be disabled |
328 | */ | 299 | */ |
329 | u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n) | 300 | static u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n) |
330 | { | 301 | { |
331 | u32 retval; | 302 | u32 retval; |
332 | u32 tmp; | 303 | u32 tmp; |
304 | |||
333 | if (n >= PPC4XX_NUM_GD) | 305 | if (n >= PPC4XX_NUM_GD) |
334 | return ERING_WAS_FULL; | 306 | return ERING_WAS_FULL; |
335 | 307 | ||
@@ -372,7 +344,7 @@ static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev, | |||
372 | { | 344 | { |
373 | *gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx; | 345 | *gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx; |
374 | 346 | ||
375 | return (struct ce_gd *) (dev->gdr + sizeof(struct ce_gd) * idx); | 347 | return &dev->gdr[idx]; |
376 | } | 348 | } |
377 | 349 | ||
378 | /** | 350 | /** |
@@ -383,7 +355,6 @@ static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev, | |||
383 | static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev) | 355 | static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev) |
384 | { | 356 | { |
385 | int i; | 357 | int i; |
386 | struct ce_sd *sd_array; | ||
387 | 358 | ||
388 | /* alloc memory for scatter descriptor ring */ | 359 | /* alloc memory for scatter descriptor ring */ |
389 | dev->sdr = dma_alloc_coherent(dev->core_dev->device, | 360 | dev->sdr = dma_alloc_coherent(dev->core_dev->device, |
@@ -392,10 +363,9 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev) | |||
392 | if (!dev->sdr) | 363 | if (!dev->sdr) |
393 | return -ENOMEM; | 364 | return -ENOMEM; |
394 | 365 | ||
395 | dev->scatter_buffer_size = PPC4XX_SD_BUFFER_SIZE; | ||
396 | dev->scatter_buffer_va = | 366 | dev->scatter_buffer_va = |
397 | dma_alloc_coherent(dev->core_dev->device, | 367 | dma_alloc_coherent(dev->core_dev->device, |
398 | dev->scatter_buffer_size * PPC4XX_NUM_SD, | 368 | PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD, |
399 | &dev->scatter_buffer_pa, GFP_ATOMIC); | 369 | &dev->scatter_buffer_pa, GFP_ATOMIC); |
400 | if (!dev->scatter_buffer_va) { | 370 | if (!dev->scatter_buffer_va) { |
401 | dma_free_coherent(dev->core_dev->device, | 371 | dma_free_coherent(dev->core_dev->device, |
@@ -404,11 +374,9 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev) | |||
404 | return -ENOMEM; | 374 | return -ENOMEM; |
405 | } | 375 | } |
406 | 376 | ||
407 | sd_array = dev->sdr; | ||
408 | |||
409 | for (i = 0; i < PPC4XX_NUM_SD; i++) { | 377 | for (i = 0; i < PPC4XX_NUM_SD; i++) { |
410 | sd_array[i].ptr = dev->scatter_buffer_pa + | 378 | dev->sdr[i].ptr = dev->scatter_buffer_pa + |
411 | dev->scatter_buffer_size * i; | 379 | PPC4XX_SD_BUFFER_SIZE * i; |
412 | } | 380 | } |
413 | 381 | ||
414 | return 0; | 382 | return 0; |
@@ -416,14 +384,14 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev) | |||
416 | 384 | ||
417 | static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev) | 385 | static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev) |
418 | { | 386 | { |
419 | if (dev->sdr != NULL) | 387 | if (dev->sdr) |
420 | dma_free_coherent(dev->core_dev->device, | 388 | dma_free_coherent(dev->core_dev->device, |
421 | sizeof(struct ce_sd) * PPC4XX_NUM_SD, | 389 | sizeof(struct ce_sd) * PPC4XX_NUM_SD, |
422 | dev->sdr, dev->sdr_pa); | 390 | dev->sdr, dev->sdr_pa); |
423 | 391 | ||
424 | if (dev->scatter_buffer_va != NULL) | 392 | if (dev->scatter_buffer_va) |
425 | dma_free_coherent(dev->core_dev->device, | 393 | dma_free_coherent(dev->core_dev->device, |
426 | dev->scatter_buffer_size * PPC4XX_NUM_SD, | 394 | PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD, |
427 | dev->scatter_buffer_va, | 395 | dev->scatter_buffer_va, |
428 | dev->scatter_buffer_pa); | 396 | dev->scatter_buffer_pa); |
429 | } | 397 | } |
@@ -477,63 +445,7 @@ static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev, | |||
477 | { | 445 | { |
478 | *sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx; | 446 | *sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx; |
479 | 447 | ||
480 | return (struct ce_sd *)(dev->sdr + sizeof(struct ce_sd) * idx); | 448 | return &dev->sdr[idx]; |
481 | } | ||
482 | |||
483 | static u32 crypto4xx_fill_one_page(struct crypto4xx_device *dev, | ||
484 | dma_addr_t *addr, u32 *length, | ||
485 | u32 *idx, u32 *offset, u32 *nbytes) | ||
486 | { | ||
487 | u32 len; | ||
488 | |||
489 | if (*length > dev->scatter_buffer_size) { | ||
490 | memcpy(phys_to_virt(*addr), | ||
491 | dev->scatter_buffer_va + | ||
492 | *idx * dev->scatter_buffer_size + *offset, | ||
493 | dev->scatter_buffer_size); | ||
494 | *offset = 0; | ||
495 | *length -= dev->scatter_buffer_size; | ||
496 | *nbytes -= dev->scatter_buffer_size; | ||
497 | if (*idx == PPC4XX_LAST_SD) | ||
498 | *idx = 0; | ||
499 | else | ||
500 | (*idx)++; | ||
501 | *addr = *addr + dev->scatter_buffer_size; | ||
502 | return 1; | ||
503 | } else if (*length < dev->scatter_buffer_size) { | ||
504 | memcpy(phys_to_virt(*addr), | ||
505 | dev->scatter_buffer_va + | ||
506 | *idx * dev->scatter_buffer_size + *offset, *length); | ||
507 | if ((*offset + *length) == dev->scatter_buffer_size) { | ||
508 | if (*idx == PPC4XX_LAST_SD) | ||
509 | *idx = 0; | ||
510 | else | ||
511 | (*idx)++; | ||
512 | *nbytes -= *length; | ||
513 | *offset = 0; | ||
514 | } else { | ||
515 | *nbytes -= *length; | ||
516 | *offset += *length; | ||
517 | } | ||
518 | |||
519 | return 0; | ||
520 | } else { | ||
521 | len = (*nbytes <= dev->scatter_buffer_size) ? | ||
522 | (*nbytes) : dev->scatter_buffer_size; | ||
523 | memcpy(phys_to_virt(*addr), | ||
524 | dev->scatter_buffer_va + | ||
525 | *idx * dev->scatter_buffer_size + *offset, | ||
526 | len); | ||
527 | *offset = 0; | ||
528 | *nbytes -= len; | ||
529 | |||
530 | if (*idx == PPC4XX_LAST_SD) | ||
531 | *idx = 0; | ||
532 | else | ||
533 | (*idx)++; | ||
534 | |||
535 | return 0; | ||
536 | } | ||
537 | } | 449 | } |
538 | 450 | ||
539 | static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev, | 451 | static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev, |
@@ -542,66 +454,52 @@ static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev, | |||
542 | u32 nbytes, | 454 | u32 nbytes, |
543 | struct scatterlist *dst) | 455 | struct scatterlist *dst) |
544 | { | 456 | { |
545 | dma_addr_t addr; | 457 | unsigned int first_sd = pd_uinfo->first_sd; |
546 | u32 this_sd; | 458 | unsigned int last_sd; |
547 | u32 offset; | 459 | unsigned int overflow = 0; |
548 | u32 len; | 460 | unsigned int to_copy; |
549 | u32 i; | 461 | unsigned int dst_start = 0; |
550 | u32 sg_len; | ||
551 | struct scatterlist *sg; | ||
552 | 462 | ||
553 | this_sd = pd_uinfo->first_sd; | 463 | /* |
554 | offset = 0; | 464 | * Because the scatter buffers are all neatly organized in one |
555 | i = 0; | 465 | * big continuous ringbuffer; scatterwalk_map_and_copy() can |
466 | * be instructed to copy a range of buffers in one go. | ||
467 | */ | ||
468 | |||
469 | last_sd = (first_sd + pd_uinfo->num_sd); | ||
470 | if (last_sd > PPC4XX_LAST_SD) { | ||
471 | last_sd = PPC4XX_LAST_SD; | ||
472 | overflow = last_sd % PPC4XX_NUM_SD; | ||
473 | } | ||
556 | 474 | ||
557 | while (nbytes) { | 475 | while (nbytes) { |
558 | sg = &dst[i]; | 476 | void *buf = dev->scatter_buffer_va + |
559 | sg_len = sg->length; | 477 | first_sd * PPC4XX_SD_BUFFER_SIZE; |
560 | addr = dma_map_page(dev->core_dev->device, sg_page(sg), | 478 | |
561 | sg->offset, sg->length, DMA_TO_DEVICE); | 479 | to_copy = min(nbytes, PPC4XX_SD_BUFFER_SIZE * |
562 | 480 | (1 + last_sd - first_sd)); | |
563 | if (offset == 0) { | 481 | scatterwalk_map_and_copy(buf, dst, dst_start, to_copy, 1); |
564 | len = (nbytes <= sg->length) ? nbytes : sg->length; | 482 | nbytes -= to_copy; |
565 | while (crypto4xx_fill_one_page(dev, &addr, &len, | 483 | |
566 | &this_sd, &offset, &nbytes)) | 484 | if (overflow) { |
567 | ; | 485 | first_sd = 0; |
568 | if (!nbytes) | 486 | last_sd = overflow; |
569 | return; | 487 | dst_start += to_copy; |
570 | i++; | 488 | overflow = 0; |
571 | } else { | ||
572 | len = (nbytes <= (dev->scatter_buffer_size - offset)) ? | ||
573 | nbytes : (dev->scatter_buffer_size - offset); | ||
574 | len = (sg->length < len) ? sg->length : len; | ||
575 | while (crypto4xx_fill_one_page(dev, &addr, &len, | ||
576 | &this_sd, &offset, &nbytes)) | ||
577 | ; | ||
578 | if (!nbytes) | ||
579 | return; | ||
580 | sg_len -= len; | ||
581 | if (sg_len) { | ||
582 | addr += len; | ||
583 | while (crypto4xx_fill_one_page(dev, &addr, | ||
584 | &sg_len, &this_sd, &offset, &nbytes)) | ||
585 | ; | ||
586 | } | ||
587 | i++; | ||
588 | } | 489 | } |
589 | } | 490 | } |
590 | } | 491 | } |
591 | 492 | ||
592 | static u32 crypto4xx_copy_digest_to_dst(struct pd_uinfo *pd_uinfo, | 493 | static void crypto4xx_copy_digest_to_dst(void *dst, |
494 | struct pd_uinfo *pd_uinfo, | ||
593 | struct crypto4xx_ctx *ctx) | 495 | struct crypto4xx_ctx *ctx) |
594 | { | 496 | { |
595 | struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in; | 497 | struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in; |
596 | struct sa_state_record *state_record = | ||
597 | (struct sa_state_record *) pd_uinfo->sr_va; | ||
598 | 498 | ||
599 | if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) { | 499 | if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) { |
600 | memcpy((void *) pd_uinfo->dest_va, state_record->save_digest, | 500 | memcpy(dst, pd_uinfo->sr_va->save_digest, |
601 | SA_HASH_ALG_SHA1_DIGEST_SIZE); | 501 | SA_HASH_ALG_SHA1_DIGEST_SIZE); |
602 | } | 502 | } |
603 | |||
604 | return 0; | ||
605 | } | 503 | } |
606 | 504 | ||
607 | static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev, | 505 | static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev, |
@@ -623,7 +521,7 @@ static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev, | |||
623 | } | 521 | } |
624 | } | 522 | } |
625 | 523 | ||
626 | static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev, | 524 | static void crypto4xx_ablkcipher_done(struct crypto4xx_device *dev, |
627 | struct pd_uinfo *pd_uinfo, | 525 | struct pd_uinfo *pd_uinfo, |
628 | struct ce_pd *pd) | 526 | struct ce_pd *pd) |
629 | { | 527 | { |
@@ -644,13 +542,13 @@ static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev, | |||
644 | dst->offset, dst->length, DMA_FROM_DEVICE); | 542 | dst->offset, dst->length, DMA_FROM_DEVICE); |
645 | } | 543 | } |
646 | crypto4xx_ret_sg_desc(dev, pd_uinfo); | 544 | crypto4xx_ret_sg_desc(dev, pd_uinfo); |
647 | if (ablk_req->base.complete != NULL) | ||
648 | ablk_req->base.complete(&ablk_req->base, 0); | ||
649 | 545 | ||
650 | return 0; | 546 | if (pd_uinfo->state & PD_ENTRY_BUSY) |
547 | ablkcipher_request_complete(ablk_req, -EINPROGRESS); | ||
548 | ablkcipher_request_complete(ablk_req, 0); | ||
651 | } | 549 | } |
652 | 550 | ||
653 | static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev, | 551 | static void crypto4xx_ahash_done(struct crypto4xx_device *dev, |
654 | struct pd_uinfo *pd_uinfo) | 552 | struct pd_uinfo *pd_uinfo) |
655 | { | 553 | { |
656 | struct crypto4xx_ctx *ctx; | 554 | struct crypto4xx_ctx *ctx; |
@@ -659,62 +557,93 @@ static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev, | |||
659 | ahash_req = ahash_request_cast(pd_uinfo->async_req); | 557 | ahash_req = ahash_request_cast(pd_uinfo->async_req); |
660 | ctx = crypto_tfm_ctx(ahash_req->base.tfm); | 558 | ctx = crypto_tfm_ctx(ahash_req->base.tfm); |
661 | 559 | ||
662 | crypto4xx_copy_digest_to_dst(pd_uinfo, | 560 | crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo, |
663 | crypto_tfm_ctx(ahash_req->base.tfm)); | 561 | crypto_tfm_ctx(ahash_req->base.tfm)); |
664 | crypto4xx_ret_sg_desc(dev, pd_uinfo); | 562 | crypto4xx_ret_sg_desc(dev, pd_uinfo); |
665 | /* call user provided callback function x */ | ||
666 | if (ahash_req->base.complete != NULL) | ||
667 | ahash_req->base.complete(&ahash_req->base, 0); | ||
668 | 563 | ||
669 | return 0; | 564 | if (pd_uinfo->state & PD_ENTRY_BUSY) |
565 | ahash_request_complete(ahash_req, -EINPROGRESS); | ||
566 | ahash_request_complete(ahash_req, 0); | ||
670 | } | 567 | } |
671 | 568 | ||
672 | static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx) | 569 | static void crypto4xx_aead_done(struct crypto4xx_device *dev, |
570 | struct pd_uinfo *pd_uinfo, | ||
571 | struct ce_pd *pd) | ||
673 | { | 572 | { |
674 | struct ce_pd *pd; | 573 | struct aead_request *aead_req; |
675 | struct pd_uinfo *pd_uinfo; | 574 | struct crypto4xx_ctx *ctx; |
575 | struct scatterlist *dst = pd_uinfo->dest_va; | ||
576 | int err = 0; | ||
676 | 577 | ||
677 | pd = dev->pdr + sizeof(struct ce_pd)*idx; | 578 | aead_req = container_of(pd_uinfo->async_req, struct aead_request, |
678 | pd_uinfo = dev->pdr_uinfo + sizeof(struct pd_uinfo)*idx; | 579 | base); |
679 | if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) == | 580 | ctx = crypto_tfm_ctx(aead_req->base.tfm); |
680 | CRYPTO_ALG_TYPE_ABLKCIPHER) | 581 | |
681 | return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd); | 582 | if (pd_uinfo->using_sd) { |
682 | else | 583 | crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, |
683 | return crypto4xx_ahash_done(dev, pd_uinfo); | 584 | pd->pd_ctl_len.bf.pkt_len, |
585 | dst); | ||
586 | } else { | ||
587 | __dma_sync_page(sg_page(dst), dst->offset, dst->length, | ||
588 | DMA_FROM_DEVICE); | ||
589 | } | ||
590 | |||
591 | if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) { | ||
592 | /* append icv at the end */ | ||
593 | size_t cp_len = crypto_aead_authsize( | ||
594 | crypto_aead_reqtfm(aead_req)); | ||
595 | u32 icv[cp_len]; | ||
596 | |||
597 | crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest, | ||
598 | cp_len); | ||
599 | |||
600 | scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen, | ||
601 | cp_len, 1); | ||
602 | } | ||
603 | |||
604 | crypto4xx_ret_sg_desc(dev, pd_uinfo); | ||
605 | |||
606 | if (pd->pd_ctl.bf.status & 0xff) { | ||
607 | if (pd->pd_ctl.bf.status & 0x1) { | ||
608 | /* authentication error */ | ||
609 | err = -EBADMSG; | ||
610 | } else { | ||
611 | if (!__ratelimit(&dev->aead_ratelimit)) { | ||
612 | if (pd->pd_ctl.bf.status & 2) | ||
613 | pr_err("pad fail error\n"); | ||
614 | if (pd->pd_ctl.bf.status & 4) | ||
615 | pr_err("seqnum fail\n"); | ||
616 | if (pd->pd_ctl.bf.status & 8) | ||
617 | pr_err("error _notify\n"); | ||
618 | pr_err("aead return err status = 0x%02x\n", | ||
619 | pd->pd_ctl.bf.status & 0xff); | ||
620 | pr_err("pd pad_ctl = 0x%08x\n", | ||
621 | pd->pd_ctl.bf.pd_pad_ctl); | ||
622 | } | ||
623 | err = -EINVAL; | ||
624 | } | ||
625 | } | ||
626 | |||
627 | if (pd_uinfo->state & PD_ENTRY_BUSY) | ||
628 | aead_request_complete(aead_req, -EINPROGRESS); | ||
629 | |||
630 | aead_request_complete(aead_req, err); | ||
684 | } | 631 | } |
685 | 632 | ||
686 | /** | 633 | static void crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx) |
687 | * Note: Only use this function to copy items that is word aligned. | ||
688 | */ | ||
689 | void crypto4xx_memcpy_le(unsigned int *dst, | ||
690 | const unsigned char *buf, | ||
691 | int len) | ||
692 | { | 634 | { |
693 | u8 *tmp; | 635 | struct ce_pd *pd = &dev->pdr[idx]; |
694 | for (; len >= 4; buf += 4, len -= 4) | 636 | struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx]; |
695 | *dst++ = cpu_to_le32(*(unsigned int *) buf); | 637 | |
696 | 638 | switch (crypto_tfm_alg_type(pd_uinfo->async_req->tfm)) { | |
697 | tmp = (u8 *)dst; | 639 | case CRYPTO_ALG_TYPE_ABLKCIPHER: |
698 | switch (len) { | 640 | crypto4xx_ablkcipher_done(dev, pd_uinfo, pd); |
699 | case 3: | ||
700 | *tmp++ = 0; | ||
701 | *tmp++ = *(buf+2); | ||
702 | *tmp++ = *(buf+1); | ||
703 | *tmp++ = *buf; | ||
704 | break; | ||
705 | case 2: | ||
706 | *tmp++ = 0; | ||
707 | *tmp++ = 0; | ||
708 | *tmp++ = *(buf+1); | ||
709 | *tmp++ = *buf; | ||
710 | break; | 641 | break; |
711 | case 1: | 642 | case CRYPTO_ALG_TYPE_AEAD: |
712 | *tmp++ = 0; | 643 | crypto4xx_aead_done(dev, pd_uinfo, pd); |
713 | *tmp++ = 0; | ||
714 | *tmp++ = 0; | ||
715 | *tmp++ = *buf; | ||
716 | break; | 644 | break; |
717 | default: | 645 | case CRYPTO_ALG_TYPE_AHASH: |
646 | crypto4xx_ahash_done(dev, pd_uinfo); | ||
718 | break; | 647 | break; |
719 | } | 648 | } |
720 | } | 649 | } |
@@ -729,17 +658,6 @@ static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev) | |||
729 | kfree(core_dev); | 658 | kfree(core_dev); |
730 | } | 659 | } |
731 | 660 | ||
732 | void crypto4xx_return_pd(struct crypto4xx_device *dev, | ||
733 | u32 pd_entry, struct ce_pd *pd, | ||
734 | struct pd_uinfo *pd_uinfo) | ||
735 | { | ||
736 | /* irq should be already disabled */ | ||
737 | dev->pdr_head = pd_entry; | ||
738 | pd->pd_ctl.w = 0; | ||
739 | pd->pd_ctl_len.w = 0; | ||
740 | pd_uinfo->state = PD_ENTRY_FREE; | ||
741 | } | ||
742 | |||
743 | static u32 get_next_gd(u32 current) | 661 | static u32 get_next_gd(u32 current) |
744 | { | 662 | { |
745 | if (current != PPC4XX_LAST_GD) | 663 | if (current != PPC4XX_LAST_GD) |
@@ -756,17 +674,19 @@ static u32 get_next_sd(u32 current) | |||
756 | return 0; | 674 | return 0; |
757 | } | 675 | } |
758 | 676 | ||
759 | u32 crypto4xx_build_pd(struct crypto_async_request *req, | 677 | int crypto4xx_build_pd(struct crypto_async_request *req, |
760 | struct crypto4xx_ctx *ctx, | 678 | struct crypto4xx_ctx *ctx, |
761 | struct scatterlist *src, | 679 | struct scatterlist *src, |
762 | struct scatterlist *dst, | 680 | struct scatterlist *dst, |
763 | unsigned int datalen, | 681 | const unsigned int datalen, |
764 | void *iv, u32 iv_len) | 682 | const __le32 *iv, const u32 iv_len, |
683 | const struct dynamic_sa_ctl *req_sa, | ||
684 | const unsigned int sa_len, | ||
685 | const unsigned int assoclen) | ||
765 | { | 686 | { |
687 | struct scatterlist _dst[2]; | ||
766 | struct crypto4xx_device *dev = ctx->dev; | 688 | struct crypto4xx_device *dev = ctx->dev; |
767 | dma_addr_t addr, pd_dma, sd_dma, gd_dma; | ||
768 | struct dynamic_sa_ctl *sa; | 689 | struct dynamic_sa_ctl *sa; |
769 | struct scatterlist *sg; | ||
770 | struct ce_gd *gd; | 690 | struct ce_gd *gd; |
771 | struct ce_pd *pd; | 691 | struct ce_pd *pd; |
772 | u32 num_gd, num_sd; | 692 | u32 num_gd, num_sd; |
@@ -774,22 +694,30 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req, | |||
774 | u32 fst_sd = 0xffffffff; | 694 | u32 fst_sd = 0xffffffff; |
775 | u32 pd_entry; | 695 | u32 pd_entry; |
776 | unsigned long flags; | 696 | unsigned long flags; |
777 | struct pd_uinfo *pd_uinfo = NULL; | 697 | struct pd_uinfo *pd_uinfo; |
778 | unsigned int nbytes = datalen, idx; | 698 | unsigned int nbytes = datalen; |
779 | unsigned int ivlen = 0; | 699 | size_t offset_to_sr_ptr; |
780 | u32 gd_idx = 0; | 700 | u32 gd_idx = 0; |
701 | int tmp; | ||
702 | bool is_busy; | ||
781 | 703 | ||
782 | /* figure how many gd is needed */ | 704 | /* figure how many gd are needed */ |
783 | num_gd = sg_nents_for_len(src, datalen); | 705 | tmp = sg_nents_for_len(src, assoclen + datalen); |
784 | if ((int)num_gd < 0) { | 706 | if (tmp < 0) { |
785 | dev_err(dev->core_dev->device, "Invalid number of src SG.\n"); | 707 | dev_err(dev->core_dev->device, "Invalid number of src SG.\n"); |
786 | return -EINVAL; | 708 | return tmp; |
787 | } | 709 | } |
788 | if (num_gd == 1) | 710 | if (tmp == 1) |
789 | num_gd = 0; | 711 | tmp = 0; |
712 | num_gd = tmp; | ||
790 | 713 | ||
791 | /* figure how many sd is needed */ | 714 | if (assoclen) { |
792 | if (sg_is_last(dst) || ctx->is_hash) { | 715 | nbytes += assoclen; |
716 | dst = scatterwalk_ffwd(_dst, dst, assoclen); | ||
717 | } | ||
718 | |||
719 | /* figure how many sd are needed */ | ||
720 | if (sg_is_last(dst)) { | ||
793 | num_sd = 0; | 721 | num_sd = 0; |
794 | } else { | 722 | } else { |
795 | if (datalen > PPC4XX_SD_BUFFER_SIZE) { | 723 | if (datalen > PPC4XX_SD_BUFFER_SIZE) { |
@@ -808,6 +736,31 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req, | |||
808 | * already got must be return the original place. | 736 | * already got must be return the original place. |
809 | */ | 737 | */ |
810 | spin_lock_irqsave(&dev->core_dev->lock, flags); | 738 | spin_lock_irqsave(&dev->core_dev->lock, flags); |
739 | /* | ||
740 | * Let the caller know to slow down, once more than 13/16ths = 81% | ||
741 | * of the available data contexts are being used simultaneously. | ||
742 | * | ||
743 | * With PPC4XX_NUM_PD = 256, this will leave a "backlog queue" for | ||
744 | * 31 more contexts. Before new requests have to be rejected. | ||
745 | */ | ||
746 | if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) { | ||
747 | is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >= | ||
748 | ((PPC4XX_NUM_PD * 13) / 16); | ||
749 | } else { | ||
750 | /* | ||
751 | * To fix contention issues between ipsec (no blacklog) and | ||
752 | * dm-crypto (backlog) reserve 32 entries for "no backlog" | ||
753 | * data contexts. | ||
754 | */ | ||
755 | is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >= | ||
756 | ((PPC4XX_NUM_PD * 15) / 16); | ||
757 | |||
758 | if (is_busy) { | ||
759 | spin_unlock_irqrestore(&dev->core_dev->lock, flags); | ||
760 | return -EBUSY; | ||
761 | } | ||
762 | } | ||
763 | |||
811 | if (num_gd) { | 764 | if (num_gd) { |
812 | fst_gd = crypto4xx_get_n_gd(dev, num_gd); | 765 | fst_gd = crypto4xx_get_n_gd(dev, num_gd); |
813 | if (fst_gd == ERING_WAS_FULL) { | 766 | if (fst_gd == ERING_WAS_FULL) { |
@@ -835,38 +788,28 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req, | |||
835 | } | 788 | } |
836 | spin_unlock_irqrestore(&dev->core_dev->lock, flags); | 789 | spin_unlock_irqrestore(&dev->core_dev->lock, flags); |
837 | 790 | ||
838 | pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo + | 791 | pd = &dev->pdr[pd_entry]; |
839 | sizeof(struct pd_uinfo) * pd_entry); | 792 | pd->sa_len = sa_len; |
840 | pd = crypto4xx_get_pdp(dev, &pd_dma, pd_entry); | 793 | |
794 | pd_uinfo = &dev->pdr_uinfo[pd_entry]; | ||
841 | pd_uinfo->async_req = req; | 795 | pd_uinfo->async_req = req; |
842 | pd_uinfo->num_gd = num_gd; | 796 | pd_uinfo->num_gd = num_gd; |
843 | pd_uinfo->num_sd = num_sd; | 797 | pd_uinfo->num_sd = num_sd; |
844 | 798 | ||
845 | if (iv_len || ctx->is_hash) { | 799 | if (iv_len) |
846 | ivlen = iv_len; | 800 | memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len); |
847 | pd->sa = pd_uinfo->sa_pa; | ||
848 | sa = (struct dynamic_sa_ctl *) pd_uinfo->sa_va; | ||
849 | if (ctx->direction == DIR_INBOUND) | ||
850 | memcpy(sa, ctx->sa_in, ctx->sa_len * 4); | ||
851 | else | ||
852 | memcpy(sa, ctx->sa_out, ctx->sa_len * 4); | ||
853 | 801 | ||
854 | memcpy((void *) sa + ctx->offset_to_sr_ptr, | 802 | sa = pd_uinfo->sa_va; |
855 | &pd_uinfo->sr_pa, 4); | 803 | memcpy(sa, req_sa, sa_len * 4); |
804 | |||
805 | sa->sa_command_1.bf.hash_crypto_offset = (assoclen >> 2); | ||
806 | offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa); | ||
807 | *(u32 *)((unsigned long)sa + offset_to_sr_ptr) = pd_uinfo->sr_pa; | ||
856 | 808 | ||
857 | if (iv_len) | ||
858 | crypto4xx_memcpy_le(pd_uinfo->sr_va, iv, iv_len); | ||
859 | } else { | ||
860 | if (ctx->direction == DIR_INBOUND) { | ||
861 | pd->sa = ctx->sa_in_dma_addr; | ||
862 | sa = (struct dynamic_sa_ctl *) ctx->sa_in; | ||
863 | } else { | ||
864 | pd->sa = ctx->sa_out_dma_addr; | ||
865 | sa = (struct dynamic_sa_ctl *) ctx->sa_out; | ||
866 | } | ||
867 | } | ||
868 | pd->sa_len = ctx->sa_len; | ||
869 | if (num_gd) { | 809 | if (num_gd) { |
810 | dma_addr_t gd_dma; | ||
811 | struct scatterlist *sg; | ||
812 | |||
870 | /* get first gd we are going to use */ | 813 | /* get first gd we are going to use */ |
871 | gd_idx = fst_gd; | 814 | gd_idx = fst_gd; |
872 | pd_uinfo->first_gd = fst_gd; | 815 | pd_uinfo->first_gd = fst_gd; |
@@ -875,27 +818,30 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req, | |||
875 | pd->src = gd_dma; | 818 | pd->src = gd_dma; |
876 | /* enable gather */ | 819 | /* enable gather */ |
877 | sa->sa_command_0.bf.gather = 1; | 820 | sa->sa_command_0.bf.gather = 1; |
878 | idx = 0; | ||
879 | src = &src[0]; | ||
880 | /* walk the sg, and setup gather array */ | 821 | /* walk the sg, and setup gather array */ |
822 | |||
823 | sg = src; | ||
881 | while (nbytes) { | 824 | while (nbytes) { |
882 | sg = &src[idx]; | 825 | size_t len; |
883 | addr = dma_map_page(dev->core_dev->device, sg_page(sg), | 826 | |
884 | sg->offset, sg->length, DMA_TO_DEVICE); | 827 | len = min(sg->length, nbytes); |
885 | gd->ptr = addr; | 828 | gd->ptr = dma_map_page(dev->core_dev->device, |
886 | gd->ctl_len.len = sg->length; | 829 | sg_page(sg), sg->offset, len, DMA_TO_DEVICE); |
830 | gd->ctl_len.len = len; | ||
887 | gd->ctl_len.done = 0; | 831 | gd->ctl_len.done = 0; |
888 | gd->ctl_len.ready = 1; | 832 | gd->ctl_len.ready = 1; |
889 | if (sg->length >= nbytes) | 833 | if (len >= nbytes) |
890 | break; | 834 | break; |
835 | |||
891 | nbytes -= sg->length; | 836 | nbytes -= sg->length; |
892 | gd_idx = get_next_gd(gd_idx); | 837 | gd_idx = get_next_gd(gd_idx); |
893 | gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx); | 838 | gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx); |
894 | idx++; | 839 | sg = sg_next(sg); |
895 | } | 840 | } |
896 | } else { | 841 | } else { |
897 | pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src), | 842 | pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src), |
898 | src->offset, src->length, DMA_TO_DEVICE); | 843 | src->offset, min(nbytes, src->length), |
844 | DMA_TO_DEVICE); | ||
899 | /* | 845 | /* |
900 | * Disable gather in sa command | 846 | * Disable gather in sa command |
901 | */ | 847 | */ |
@@ -906,25 +852,24 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req, | |||
906 | pd_uinfo->first_gd = 0xffffffff; | 852 | pd_uinfo->first_gd = 0xffffffff; |
907 | pd_uinfo->num_gd = 0; | 853 | pd_uinfo->num_gd = 0; |
908 | } | 854 | } |
909 | if (ctx->is_hash || sg_is_last(dst)) { | 855 | if (sg_is_last(dst)) { |
910 | /* | 856 | /* |
911 | * we know application give us dst a whole piece of memory | 857 | * we know application give us dst a whole piece of memory |
912 | * no need to use scatter ring. | 858 | * no need to use scatter ring. |
913 | * In case of is_hash, the icv is always at end of src data. | ||
914 | */ | 859 | */ |
915 | pd_uinfo->using_sd = 0; | 860 | pd_uinfo->using_sd = 0; |
916 | pd_uinfo->first_sd = 0xffffffff; | 861 | pd_uinfo->first_sd = 0xffffffff; |
917 | pd_uinfo->num_sd = 0; | 862 | pd_uinfo->num_sd = 0; |
918 | pd_uinfo->dest_va = dst; | 863 | pd_uinfo->dest_va = dst; |
919 | sa->sa_command_0.bf.scatter = 0; | 864 | sa->sa_command_0.bf.scatter = 0; |
920 | if (ctx->is_hash) | 865 | pd->dest = (u32)dma_map_page(dev->core_dev->device, |
921 | pd->dest = virt_to_phys((void *)dst); | 866 | sg_page(dst), dst->offset, |
922 | else | 867 | min(datalen, dst->length), |
923 | pd->dest = (u32)dma_map_page(dev->core_dev->device, | 868 | DMA_TO_DEVICE); |
924 | sg_page(dst), dst->offset, | ||
925 | dst->length, DMA_TO_DEVICE); | ||
926 | } else { | 869 | } else { |
870 | dma_addr_t sd_dma; | ||
927 | struct ce_sd *sd = NULL; | 871 | struct ce_sd *sd = NULL; |
872 | |||
928 | u32 sd_idx = fst_sd; | 873 | u32 sd_idx = fst_sd; |
929 | nbytes = datalen; | 874 | nbytes = datalen; |
930 | sa->sa_command_0.bf.scatter = 1; | 875 | sa->sa_command_0.bf.scatter = 1; |
@@ -938,7 +883,6 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req, | |||
938 | sd->ctl.done = 0; | 883 | sd->ctl.done = 0; |
939 | sd->ctl.rdy = 1; | 884 | sd->ctl.rdy = 1; |
940 | /* sd->ptr should be setup by sd_init routine*/ | 885 | /* sd->ptr should be setup by sd_init routine*/ |
941 | idx = 0; | ||
942 | if (nbytes >= PPC4XX_SD_BUFFER_SIZE) | 886 | if (nbytes >= PPC4XX_SD_BUFFER_SIZE) |
943 | nbytes -= PPC4XX_SD_BUFFER_SIZE; | 887 | nbytes -= PPC4XX_SD_BUFFER_SIZE; |
944 | else | 888 | else |
@@ -949,67 +893,97 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req, | |||
949 | /* setup scatter descriptor */ | 893 | /* setup scatter descriptor */ |
950 | sd->ctl.done = 0; | 894 | sd->ctl.done = 0; |
951 | sd->ctl.rdy = 1; | 895 | sd->ctl.rdy = 1; |
952 | if (nbytes >= PPC4XX_SD_BUFFER_SIZE) | 896 | if (nbytes >= PPC4XX_SD_BUFFER_SIZE) { |
953 | nbytes -= PPC4XX_SD_BUFFER_SIZE; | 897 | nbytes -= PPC4XX_SD_BUFFER_SIZE; |
954 | else | 898 | } else { |
955 | /* | 899 | /* |
956 | * SD entry can hold PPC4XX_SD_BUFFER_SIZE, | 900 | * SD entry can hold PPC4XX_SD_BUFFER_SIZE, |
957 | * which is more than nbytes, so done. | 901 | * which is more than nbytes, so done. |
958 | */ | 902 | */ |
959 | nbytes = 0; | 903 | nbytes = 0; |
904 | } | ||
960 | } | 905 | } |
961 | } | 906 | } |
962 | 907 | ||
963 | sa->sa_command_1.bf.hash_crypto_offset = 0; | 908 | pd->pd_ctl.w = PD_CTL_HOST_READY | |
964 | pd->pd_ctl.w = ctx->pd_ctl; | 909 | ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) | |
965 | pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) | datalen; | 910 | (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ? |
966 | pd_uinfo->state = PD_ENTRY_INUSE; | 911 | PD_CTL_HASH_FINAL : 0); |
912 | pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen); | ||
913 | pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0); | ||
914 | |||
967 | wmb(); | 915 | wmb(); |
968 | /* write any value to push engine to read a pd */ | 916 | /* write any value to push engine to read a pd */ |
917 | writel(0, dev->ce_base + CRYPTO4XX_INT_DESCR_RD); | ||
969 | writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD); | 918 | writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD); |
970 | return -EINPROGRESS; | 919 | return is_busy ? -EBUSY : -EINPROGRESS; |
971 | } | 920 | } |
972 | 921 | ||
973 | /** | 922 | /** |
974 | * Algorithm Registration Functions | 923 | * Algorithm Registration Functions |
975 | */ | 924 | */ |
976 | static int crypto4xx_alg_init(struct crypto_tfm *tfm) | 925 | static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg, |
926 | struct crypto4xx_ctx *ctx) | ||
977 | { | 927 | { |
978 | struct crypto_alg *alg = tfm->__crt_alg; | ||
979 | struct crypto4xx_alg *amcc_alg = crypto_alg_to_crypto4xx_alg(alg); | ||
980 | struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); | ||
981 | |||
982 | ctx->dev = amcc_alg->dev; | 928 | ctx->dev = amcc_alg->dev; |
983 | ctx->sa_in = NULL; | 929 | ctx->sa_in = NULL; |
984 | ctx->sa_out = NULL; | 930 | ctx->sa_out = NULL; |
985 | ctx->sa_in_dma_addr = 0; | ||
986 | ctx->sa_out_dma_addr = 0; | ||
987 | ctx->sa_len = 0; | 931 | ctx->sa_len = 0; |
932 | } | ||
988 | 933 | ||
989 | switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { | 934 | static int crypto4xx_ablk_init(struct crypto_tfm *tfm) |
990 | default: | 935 | { |
991 | tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx); | 936 | struct crypto_alg *alg = tfm->__crt_alg; |
992 | break; | 937 | struct crypto4xx_alg *amcc_alg; |
993 | case CRYPTO_ALG_TYPE_AHASH: | 938 | struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); |
994 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | ||
995 | sizeof(struct crypto4xx_ctx)); | ||
996 | break; | ||
997 | } | ||
998 | 939 | ||
940 | amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher); | ||
941 | crypto4xx_ctx_init(amcc_alg, ctx); | ||
942 | tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx); | ||
999 | return 0; | 943 | return 0; |
1000 | } | 944 | } |
1001 | 945 | ||
1002 | static void crypto4xx_alg_exit(struct crypto_tfm *tfm) | 946 | static void crypto4xx_common_exit(struct crypto4xx_ctx *ctx) |
1003 | { | 947 | { |
1004 | struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); | ||
1005 | |||
1006 | crypto4xx_free_sa(ctx); | 948 | crypto4xx_free_sa(ctx); |
1007 | crypto4xx_free_state_record(ctx); | ||
1008 | } | 949 | } |
1009 | 950 | ||
1010 | int crypto4xx_register_alg(struct crypto4xx_device *sec_dev, | 951 | static void crypto4xx_ablk_exit(struct crypto_tfm *tfm) |
1011 | struct crypto4xx_alg_common *crypto_alg, | 952 | { |
1012 | int array_size) | 953 | crypto4xx_common_exit(crypto_tfm_ctx(tfm)); |
954 | } | ||
955 | |||
956 | static int crypto4xx_aead_init(struct crypto_aead *tfm) | ||
957 | { | ||
958 | struct aead_alg *alg = crypto_aead_alg(tfm); | ||
959 | struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm); | ||
960 | struct crypto4xx_alg *amcc_alg; | ||
961 | |||
962 | ctx->sw_cipher.aead = crypto_alloc_aead(alg->base.cra_name, 0, | ||
963 | CRYPTO_ALG_NEED_FALLBACK | | ||
964 | CRYPTO_ALG_ASYNC); | ||
965 | if (IS_ERR(ctx->sw_cipher.aead)) | ||
966 | return PTR_ERR(ctx->sw_cipher.aead); | ||
967 | |||
968 | amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.aead); | ||
969 | crypto4xx_ctx_init(amcc_alg, ctx); | ||
970 | crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) + | ||
971 | max(sizeof(struct crypto4xx_ctx), 32 + | ||
972 | crypto_aead_reqsize(ctx->sw_cipher.aead))); | ||
973 | return 0; | ||
974 | } | ||
975 | |||
976 | static void crypto4xx_aead_exit(struct crypto_aead *tfm) | ||
977 | { | ||
978 | struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm); | ||
979 | |||
980 | crypto4xx_common_exit(ctx); | ||
981 | crypto_free_aead(ctx->sw_cipher.aead); | ||
982 | } | ||
983 | |||
984 | static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev, | ||
985 | struct crypto4xx_alg_common *crypto_alg, | ||
986 | int array_size) | ||
1013 | { | 987 | { |
1014 | struct crypto4xx_alg *alg; | 988 | struct crypto4xx_alg *alg; |
1015 | int i; | 989 | int i; |
@@ -1024,6 +998,10 @@ int crypto4xx_register_alg(struct crypto4xx_device *sec_dev, | |||
1024 | alg->dev = sec_dev; | 998 | alg->dev = sec_dev; |
1025 | 999 | ||
1026 | switch (alg->alg.type) { | 1000 | switch (alg->alg.type) { |
1001 | case CRYPTO_ALG_TYPE_AEAD: | ||
1002 | rc = crypto_register_aead(&alg->alg.u.aead); | ||
1003 | break; | ||
1004 | |||
1027 | case CRYPTO_ALG_TYPE_AHASH: | 1005 | case CRYPTO_ALG_TYPE_AHASH: |
1028 | rc = crypto_register_ahash(&alg->alg.u.hash); | 1006 | rc = crypto_register_ahash(&alg->alg.u.hash); |
1029 | break; | 1007 | break; |
@@ -1033,12 +1011,10 @@ int crypto4xx_register_alg(struct crypto4xx_device *sec_dev, | |||
1033 | break; | 1011 | break; |
1034 | } | 1012 | } |
1035 | 1013 | ||
1036 | if (rc) { | 1014 | if (rc) |
1037 | list_del(&alg->entry); | ||
1038 | kfree(alg); | 1015 | kfree(alg); |
1039 | } else { | 1016 | else |
1040 | list_add_tail(&alg->entry, &sec_dev->alg_list); | 1017 | list_add_tail(&alg->entry, &sec_dev->alg_list); |
1041 | } | ||
1042 | } | 1018 | } |
1043 | 1019 | ||
1044 | return 0; | 1020 | return 0; |
@@ -1055,6 +1031,10 @@ static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev) | |||
1055 | crypto_unregister_ahash(&alg->alg.u.hash); | 1031 | crypto_unregister_ahash(&alg->alg.u.hash); |
1056 | break; | 1032 | break; |
1057 | 1033 | ||
1034 | case CRYPTO_ALG_TYPE_AEAD: | ||
1035 | crypto_unregister_aead(&alg->alg.u.aead); | ||
1036 | break; | ||
1037 | |||
1058 | default: | 1038 | default: |
1059 | crypto_unregister_alg(&alg->alg.u.cipher); | 1039 | crypto_unregister_alg(&alg->alg.u.cipher); |
1060 | } | 1040 | } |
@@ -1068,25 +1048,23 @@ static void crypto4xx_bh_tasklet_cb(unsigned long data) | |||
1068 | struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev); | 1048 | struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev); |
1069 | struct pd_uinfo *pd_uinfo; | 1049 | struct pd_uinfo *pd_uinfo; |
1070 | struct ce_pd *pd; | 1050 | struct ce_pd *pd; |
1071 | u32 tail; | 1051 | u32 tail = core_dev->dev->pdr_tail; |
1072 | 1052 | u32 head = core_dev->dev->pdr_head; | |
1073 | while (core_dev->dev->pdr_head != core_dev->dev->pdr_tail) { | 1053 | |
1074 | tail = core_dev->dev->pdr_tail; | 1054 | do { |
1075 | pd_uinfo = core_dev->dev->pdr_uinfo + | 1055 | pd_uinfo = &core_dev->dev->pdr_uinfo[tail]; |
1076 | sizeof(struct pd_uinfo)*tail; | 1056 | pd = &core_dev->dev->pdr[tail]; |
1077 | pd = core_dev->dev->pdr + sizeof(struct ce_pd) * tail; | 1057 | if ((pd_uinfo->state & PD_ENTRY_INUSE) && |
1078 | if ((pd_uinfo->state == PD_ENTRY_INUSE) && | 1058 | ((READ_ONCE(pd->pd_ctl.w) & |
1079 | pd->pd_ctl.bf.pe_done && | 1059 | (PD_CTL_PE_DONE | PD_CTL_HOST_READY)) == |
1080 | !pd->pd_ctl.bf.host_ready) { | 1060 | PD_CTL_PE_DONE)) { |
1081 | pd->pd_ctl.bf.pe_done = 0; | ||
1082 | crypto4xx_pd_done(core_dev->dev, tail); | 1061 | crypto4xx_pd_done(core_dev->dev, tail); |
1083 | crypto4xx_put_pd_to_pdr(core_dev->dev, tail); | 1062 | tail = crypto4xx_put_pd_to_pdr(core_dev->dev, tail); |
1084 | pd_uinfo->state = PD_ENTRY_FREE; | ||
1085 | } else { | 1063 | } else { |
1086 | /* if tail not done, break */ | 1064 | /* if tail not done, break */ |
1087 | break; | 1065 | break; |
1088 | } | 1066 | } |
1089 | } | 1067 | } while (head != tail); |
1090 | } | 1068 | } |
1091 | 1069 | ||
1092 | /** | 1070 | /** |
@@ -1110,18 +1088,20 @@ static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data) | |||
1110 | /** | 1088 | /** |
1111 | * Supported Crypto Algorithms | 1089 | * Supported Crypto Algorithms |
1112 | */ | 1090 | */ |
1113 | struct crypto4xx_alg_common crypto4xx_alg[] = { | 1091 | static struct crypto4xx_alg_common crypto4xx_alg[] = { |
1114 | /* Crypto AES modes */ | 1092 | /* Crypto AES modes */ |
1115 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = { | 1093 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = { |
1116 | .cra_name = "cbc(aes)", | 1094 | .cra_name = "cbc(aes)", |
1117 | .cra_driver_name = "cbc-aes-ppc4xx", | 1095 | .cra_driver_name = "cbc-aes-ppc4xx", |
1118 | .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, | 1096 | .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, |
1119 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1097 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
1098 | CRYPTO_ALG_ASYNC | | ||
1099 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
1120 | .cra_blocksize = AES_BLOCK_SIZE, | 1100 | .cra_blocksize = AES_BLOCK_SIZE, |
1121 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), | 1101 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), |
1122 | .cra_type = &crypto_ablkcipher_type, | 1102 | .cra_type = &crypto_ablkcipher_type, |
1123 | .cra_init = crypto4xx_alg_init, | 1103 | .cra_init = crypto4xx_ablk_init, |
1124 | .cra_exit = crypto4xx_alg_exit, | 1104 | .cra_exit = crypto4xx_ablk_exit, |
1125 | .cra_module = THIS_MODULE, | 1105 | .cra_module = THIS_MODULE, |
1126 | .cra_u = { | 1106 | .cra_u = { |
1127 | .ablkcipher = { | 1107 | .ablkcipher = { |
@@ -1134,6 +1114,147 @@ struct crypto4xx_alg_common crypto4xx_alg[] = { | |||
1134 | } | 1114 | } |
1135 | } | 1115 | } |
1136 | }}, | 1116 | }}, |
1117 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = { | ||
1118 | .cra_name = "cfb(aes)", | ||
1119 | .cra_driver_name = "cfb-aes-ppc4xx", | ||
1120 | .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, | ||
1121 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
1122 | CRYPTO_ALG_ASYNC | | ||
1123 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
1124 | .cra_blocksize = AES_BLOCK_SIZE, | ||
1125 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), | ||
1126 | .cra_type = &crypto_ablkcipher_type, | ||
1127 | .cra_init = crypto4xx_ablk_init, | ||
1128 | .cra_exit = crypto4xx_ablk_exit, | ||
1129 | .cra_module = THIS_MODULE, | ||
1130 | .cra_u = { | ||
1131 | .ablkcipher = { | ||
1132 | .min_keysize = AES_MIN_KEY_SIZE, | ||
1133 | .max_keysize = AES_MAX_KEY_SIZE, | ||
1134 | .ivsize = AES_IV_SIZE, | ||
1135 | .setkey = crypto4xx_setkey_aes_cfb, | ||
1136 | .encrypt = crypto4xx_encrypt, | ||
1137 | .decrypt = crypto4xx_decrypt, | ||
1138 | } | ||
1139 | } | ||
1140 | } }, | ||
1141 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = { | ||
1142 | .cra_name = "rfc3686(ctr(aes))", | ||
1143 | .cra_driver_name = "rfc3686-ctr-aes-ppc4xx", | ||
1144 | .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, | ||
1145 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
1146 | CRYPTO_ALG_ASYNC | | ||
1147 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
1148 | .cra_blocksize = AES_BLOCK_SIZE, | ||
1149 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), | ||
1150 | .cra_type = &crypto_ablkcipher_type, | ||
1151 | .cra_init = crypto4xx_ablk_init, | ||
1152 | .cra_exit = crypto4xx_ablk_exit, | ||
1153 | .cra_module = THIS_MODULE, | ||
1154 | .cra_u = { | ||
1155 | .ablkcipher = { | ||
1156 | .min_keysize = AES_MIN_KEY_SIZE + | ||
1157 | CTR_RFC3686_NONCE_SIZE, | ||
1158 | .max_keysize = AES_MAX_KEY_SIZE + | ||
1159 | CTR_RFC3686_NONCE_SIZE, | ||
1160 | .ivsize = CTR_RFC3686_IV_SIZE, | ||
1161 | .setkey = crypto4xx_setkey_rfc3686, | ||
1162 | .encrypt = crypto4xx_rfc3686_encrypt, | ||
1163 | .decrypt = crypto4xx_rfc3686_decrypt, | ||
1164 | } | ||
1165 | } | ||
1166 | } }, | ||
1167 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = { | ||
1168 | .cra_name = "ecb(aes)", | ||
1169 | .cra_driver_name = "ecb-aes-ppc4xx", | ||
1170 | .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, | ||
1171 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
1172 | CRYPTO_ALG_ASYNC | | ||
1173 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
1174 | .cra_blocksize = AES_BLOCK_SIZE, | ||
1175 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), | ||
1176 | .cra_type = &crypto_ablkcipher_type, | ||
1177 | .cra_init = crypto4xx_ablk_init, | ||
1178 | .cra_exit = crypto4xx_ablk_exit, | ||
1179 | .cra_module = THIS_MODULE, | ||
1180 | .cra_u = { | ||
1181 | .ablkcipher = { | ||
1182 | .min_keysize = AES_MIN_KEY_SIZE, | ||
1183 | .max_keysize = AES_MAX_KEY_SIZE, | ||
1184 | .setkey = crypto4xx_setkey_aes_ecb, | ||
1185 | .encrypt = crypto4xx_encrypt, | ||
1186 | .decrypt = crypto4xx_decrypt, | ||
1187 | } | ||
1188 | } | ||
1189 | } }, | ||
1190 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = { | ||
1191 | .cra_name = "ofb(aes)", | ||
1192 | .cra_driver_name = "ofb-aes-ppc4xx", | ||
1193 | .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, | ||
1194 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
1195 | CRYPTO_ALG_ASYNC | | ||
1196 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
1197 | .cra_blocksize = AES_BLOCK_SIZE, | ||
1198 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), | ||
1199 | .cra_type = &crypto_ablkcipher_type, | ||
1200 | .cra_init = crypto4xx_ablk_init, | ||
1201 | .cra_exit = crypto4xx_ablk_exit, | ||
1202 | .cra_module = THIS_MODULE, | ||
1203 | .cra_u = { | ||
1204 | .ablkcipher = { | ||
1205 | .min_keysize = AES_MIN_KEY_SIZE, | ||
1206 | .max_keysize = AES_MAX_KEY_SIZE, | ||
1207 | .ivsize = AES_IV_SIZE, | ||
1208 | .setkey = crypto4xx_setkey_aes_ofb, | ||
1209 | .encrypt = crypto4xx_encrypt, | ||
1210 | .decrypt = crypto4xx_decrypt, | ||
1211 | } | ||
1212 | } | ||
1213 | } }, | ||
1214 | |||
1215 | /* AEAD */ | ||
1216 | { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = { | ||
1217 | .setkey = crypto4xx_setkey_aes_ccm, | ||
1218 | .setauthsize = crypto4xx_setauthsize_aead, | ||
1219 | .encrypt = crypto4xx_encrypt_aes_ccm, | ||
1220 | .decrypt = crypto4xx_decrypt_aes_ccm, | ||
1221 | .init = crypto4xx_aead_init, | ||
1222 | .exit = crypto4xx_aead_exit, | ||
1223 | .ivsize = AES_BLOCK_SIZE, | ||
1224 | .maxauthsize = 16, | ||
1225 | .base = { | ||
1226 | .cra_name = "ccm(aes)", | ||
1227 | .cra_driver_name = "ccm-aes-ppc4xx", | ||
1228 | .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, | ||
1229 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
1230 | CRYPTO_ALG_NEED_FALLBACK | | ||
1231 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
1232 | .cra_blocksize = 1, | ||
1233 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), | ||
1234 | .cra_module = THIS_MODULE, | ||
1235 | }, | ||
1236 | } }, | ||
1237 | { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = { | ||
1238 | .setkey = crypto4xx_setkey_aes_gcm, | ||
1239 | .setauthsize = crypto4xx_setauthsize_aead, | ||
1240 | .encrypt = crypto4xx_encrypt_aes_gcm, | ||
1241 | .decrypt = crypto4xx_decrypt_aes_gcm, | ||
1242 | .init = crypto4xx_aead_init, | ||
1243 | .exit = crypto4xx_aead_exit, | ||
1244 | .ivsize = GCM_AES_IV_SIZE, | ||
1245 | .maxauthsize = 16, | ||
1246 | .base = { | ||
1247 | .cra_name = "gcm(aes)", | ||
1248 | .cra_driver_name = "gcm-aes-ppc4xx", | ||
1249 | .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, | ||
1250 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
1251 | CRYPTO_ALG_NEED_FALLBACK | | ||
1252 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
1253 | .cra_blocksize = 1, | ||
1254 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), | ||
1255 | .cra_module = THIS_MODULE, | ||
1256 | }, | ||
1257 | } }, | ||
1137 | }; | 1258 | }; |
1138 | 1259 | ||
1139 | /** | 1260 | /** |
@@ -1187,13 +1308,14 @@ static int crypto4xx_probe(struct platform_device *ofdev) | |||
1187 | core_dev->device = dev; | 1308 | core_dev->device = dev; |
1188 | spin_lock_init(&core_dev->lock); | 1309 | spin_lock_init(&core_dev->lock); |
1189 | INIT_LIST_HEAD(&core_dev->dev->alg_list); | 1310 | INIT_LIST_HEAD(&core_dev->dev->alg_list); |
1311 | ratelimit_default_init(&core_dev->dev->aead_ratelimit); | ||
1190 | rc = crypto4xx_build_pdr(core_dev->dev); | 1312 | rc = crypto4xx_build_pdr(core_dev->dev); |
1191 | if (rc) | 1313 | if (rc) |
1192 | goto err_build_pdr; | 1314 | goto err_build_pdr; |
1193 | 1315 | ||
1194 | rc = crypto4xx_build_gdr(core_dev->dev); | 1316 | rc = crypto4xx_build_gdr(core_dev->dev); |
1195 | if (rc) | 1317 | if (rc) |
1196 | goto err_build_gdr; | 1318 | goto err_build_pdr; |
1197 | 1319 | ||
1198 | rc = crypto4xx_build_sdr(core_dev->dev); | 1320 | rc = crypto4xx_build_sdr(core_dev->dev); |
1199 | if (rc) | 1321 | if (rc) |
@@ -1236,12 +1358,11 @@ err_iomap: | |||
1236 | err_request_irq: | 1358 | err_request_irq: |
1237 | irq_dispose_mapping(core_dev->irq); | 1359 | irq_dispose_mapping(core_dev->irq); |
1238 | tasklet_kill(&core_dev->tasklet); | 1360 | tasklet_kill(&core_dev->tasklet); |
1239 | crypto4xx_destroy_sdr(core_dev->dev); | ||
1240 | err_build_sdr: | 1361 | err_build_sdr: |
1362 | crypto4xx_destroy_sdr(core_dev->dev); | ||
1241 | crypto4xx_destroy_gdr(core_dev->dev); | 1363 | crypto4xx_destroy_gdr(core_dev->dev); |
1242 | err_build_gdr: | ||
1243 | crypto4xx_destroy_pdr(core_dev->dev); | ||
1244 | err_build_pdr: | 1364 | err_build_pdr: |
1365 | crypto4xx_destroy_pdr(core_dev->dev); | ||
1245 | kfree(core_dev->dev); | 1366 | kfree(core_dev->dev); |
1246 | err_alloc_dev: | 1367 | err_alloc_dev: |
1247 | kfree(core_dev); | 1368 | kfree(core_dev); |
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h index ecfdcfe3698d..8ac3bd37203b 100644 --- a/drivers/crypto/amcc/crypto4xx_core.h +++ b/drivers/crypto/amcc/crypto4xx_core.h | |||
@@ -22,7 +22,11 @@ | |||
22 | #ifndef __CRYPTO4XX_CORE_H__ | 22 | #ifndef __CRYPTO4XX_CORE_H__ |
23 | #define __CRYPTO4XX_CORE_H__ | 23 | #define __CRYPTO4XX_CORE_H__ |
24 | 24 | ||
25 | #include <linux/ratelimit.h> | ||
25 | #include <crypto/internal/hash.h> | 26 | #include <crypto/internal/hash.h> |
27 | #include <crypto/internal/aead.h> | ||
28 | #include "crypto4xx_reg_def.h" | ||
29 | #include "crypto4xx_sa.h" | ||
26 | 30 | ||
27 | #define MODULE_NAME "crypto4xx" | 31 | #define MODULE_NAME "crypto4xx" |
28 | 32 | ||
@@ -34,20 +38,28 @@ | |||
34 | #define PPC405EX_CE_RESET 0x00000008 | 38 | #define PPC405EX_CE_RESET 0x00000008 |
35 | 39 | ||
36 | #define CRYPTO4XX_CRYPTO_PRIORITY 300 | 40 | #define CRYPTO4XX_CRYPTO_PRIORITY 300 |
37 | #define PPC4XX_LAST_PD 63 | 41 | #define PPC4XX_NUM_PD 256 |
38 | #define PPC4XX_NUM_PD 64 | 42 | #define PPC4XX_LAST_PD (PPC4XX_NUM_PD - 1) |
39 | #define PPC4XX_LAST_GD 1023 | ||
40 | #define PPC4XX_NUM_GD 1024 | 43 | #define PPC4XX_NUM_GD 1024 |
41 | #define PPC4XX_LAST_SD 63 | 44 | #define PPC4XX_LAST_GD (PPC4XX_NUM_GD - 1) |
42 | #define PPC4XX_NUM_SD 64 | 45 | #define PPC4XX_NUM_SD 256 |
46 | #define PPC4XX_LAST_SD (PPC4XX_NUM_SD - 1) | ||
43 | #define PPC4XX_SD_BUFFER_SIZE 2048 | 47 | #define PPC4XX_SD_BUFFER_SIZE 2048 |
44 | 48 | ||
45 | #define PD_ENTRY_INUSE 1 | 49 | #define PD_ENTRY_BUSY BIT(1) |
50 | #define PD_ENTRY_INUSE BIT(0) | ||
46 | #define PD_ENTRY_FREE 0 | 51 | #define PD_ENTRY_FREE 0 |
47 | #define ERING_WAS_FULL 0xffffffff | 52 | #define ERING_WAS_FULL 0xffffffff |
48 | 53 | ||
49 | struct crypto4xx_device; | 54 | struct crypto4xx_device; |
50 | 55 | ||
56 | union shadow_sa_buf { | ||
57 | struct dynamic_sa_ctl sa; | ||
58 | |||
59 | /* alloc 256 bytes which is enough for any kind of dynamic sa */ | ||
60 | u8 buf[256]; | ||
61 | } __packed; | ||
62 | |||
51 | struct pd_uinfo { | 63 | struct pd_uinfo { |
52 | struct crypto4xx_device *dev; | 64 | struct crypto4xx_device *dev; |
53 | u32 state; | 65 | u32 state; |
@@ -60,9 +72,8 @@ struct pd_uinfo { | |||
60 | used by this packet */ | 72 | used by this packet */ |
61 | u32 num_sd; /* number of scatter discriptors | 73 | u32 num_sd; /* number of scatter discriptors |
62 | used by this packet */ | 74 | used by this packet */ |
63 | void *sa_va; /* shadow sa, when using cp from ctx->sa */ | 75 | struct dynamic_sa_ctl *sa_va; /* shadow sa */ |
64 | u32 sa_pa; | 76 | struct sa_state_record *sr_va; /* state record for shadow sa */ |
65 | void *sr_va; /* state record for shadow sa */ | ||
66 | u32 sr_pa; | 77 | u32 sr_pa; |
67 | struct scatterlist *dest_va; | 78 | struct scatterlist *dest_va; |
68 | struct crypto_async_request *async_req; /* base crypto request | 79 | struct crypto_async_request *async_req; /* base crypto request |
@@ -72,27 +83,21 @@ struct pd_uinfo { | |||
72 | struct crypto4xx_device { | 83 | struct crypto4xx_device { |
73 | struct crypto4xx_core_device *core_dev; | 84 | struct crypto4xx_core_device *core_dev; |
74 | char *name; | 85 | char *name; |
75 | u64 ce_phy_address; | ||
76 | void __iomem *ce_base; | 86 | void __iomem *ce_base; |
77 | void __iomem *trng_base; | 87 | void __iomem *trng_base; |
78 | 88 | ||
79 | void *pdr; /* base address of packet | 89 | struct ce_pd *pdr; /* base address of packet descriptor ring */ |
80 | descriptor ring */ | 90 | dma_addr_t pdr_pa; /* physical address of pdr_base_register */ |
81 | dma_addr_t pdr_pa; /* physical address used to | 91 | struct ce_gd *gdr; /* gather descriptor ring */ |
82 | program ce pdr_base_register */ | 92 | dma_addr_t gdr_pa; /* physical address of gdr_base_register */ |
83 | void *gdr; /* gather descriptor ring */ | 93 | struct ce_sd *sdr; /* scatter descriptor ring */ |
84 | dma_addr_t gdr_pa; /* physical address used to | 94 | dma_addr_t sdr_pa; /* physical address of sdr_base_register */ |
85 | program ce gdr_base_register */ | ||
86 | void *sdr; /* scatter descriptor ring */ | ||
87 | dma_addr_t sdr_pa; /* physical address used to | ||
88 | program ce sdr_base_register */ | ||
89 | void *scatter_buffer_va; | 95 | void *scatter_buffer_va; |
90 | dma_addr_t scatter_buffer_pa; | 96 | dma_addr_t scatter_buffer_pa; |
91 | u32 scatter_buffer_size; | ||
92 | 97 | ||
93 | void *shadow_sa_pool; /* pool of memory for sa in pd_uinfo */ | 98 | union shadow_sa_buf *shadow_sa_pool; |
94 | dma_addr_t shadow_sa_pool_pa; | 99 | dma_addr_t shadow_sa_pool_pa; |
95 | void *shadow_sr_pool; /* pool of memory for sr in pd_uinfo */ | 100 | struct sa_state_record *shadow_sr_pool; |
96 | dma_addr_t shadow_sr_pool_pa; | 101 | dma_addr_t shadow_sr_pool_pa; |
97 | u32 pdr_tail; | 102 | u32 pdr_tail; |
98 | u32 pdr_head; | 103 | u32 pdr_head; |
@@ -100,9 +105,10 @@ struct crypto4xx_device { | |||
100 | u32 gdr_head; | 105 | u32 gdr_head; |
101 | u32 sdr_tail; | 106 | u32 sdr_tail; |
102 | u32 sdr_head; | 107 | u32 sdr_head; |
103 | void *pdr_uinfo; | 108 | struct pd_uinfo *pdr_uinfo; |
104 | struct list_head alg_list; /* List of algorithm supported | 109 | struct list_head alg_list; /* List of algorithm supported |
105 | by this device */ | 110 | by this device */ |
111 | struct ratelimit_state aead_ratelimit; | ||
106 | }; | 112 | }; |
107 | 113 | ||
108 | struct crypto4xx_core_device { | 114 | struct crypto4xx_core_device { |
@@ -118,30 +124,13 @@ struct crypto4xx_core_device { | |||
118 | 124 | ||
119 | struct crypto4xx_ctx { | 125 | struct crypto4xx_ctx { |
120 | struct crypto4xx_device *dev; | 126 | struct crypto4xx_device *dev; |
121 | void *sa_in; | 127 | struct dynamic_sa_ctl *sa_in; |
122 | dma_addr_t sa_in_dma_addr; | 128 | struct dynamic_sa_ctl *sa_out; |
123 | void *sa_out; | 129 | __le32 iv_nonce; |
124 | dma_addr_t sa_out_dma_addr; | ||
125 | void *state_record; | ||
126 | dma_addr_t state_record_dma_addr; | ||
127 | u32 sa_len; | 130 | u32 sa_len; |
128 | u32 offset_to_sr_ptr; /* offset to state ptr, in dynamic sa */ | 131 | union { |
129 | u32 direction; | 132 | struct crypto_aead *aead; |
130 | u32 next_hdr; | 133 | } sw_cipher; |
131 | u32 save_iv; | ||
132 | u32 pd_ctl_len; | ||
133 | u32 pd_ctl; | ||
134 | u32 bypass; | ||
135 | u32 is_hash; | ||
136 | u32 hash_final; | ||
137 | }; | ||
138 | |||
139 | struct crypto4xx_req_ctx { | ||
140 | struct crypto4xx_device *dev; /* Device in which | ||
141 | operation to send to */ | ||
142 | void *sa; | ||
143 | u32 sa_dma_addr; | ||
144 | u16 sa_len; | ||
145 | }; | 134 | }; |
146 | 135 | ||
147 | struct crypto4xx_alg_common { | 136 | struct crypto4xx_alg_common { |
@@ -149,6 +138,7 @@ struct crypto4xx_alg_common { | |||
149 | union { | 138 | union { |
150 | struct crypto_alg cipher; | 139 | struct crypto_alg cipher; |
151 | struct ahash_alg hash; | 140 | struct ahash_alg hash; |
141 | struct aead_alg aead; | ||
152 | } u; | 142 | } u; |
153 | }; | 143 | }; |
154 | 144 | ||
@@ -158,43 +148,90 @@ struct crypto4xx_alg { | |||
158 | struct crypto4xx_device *dev; | 148 | struct crypto4xx_device *dev; |
159 | }; | 149 | }; |
160 | 150 | ||
161 | static inline struct crypto4xx_alg *crypto_alg_to_crypto4xx_alg( | 151 | int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size); |
162 | struct crypto_alg *x) | 152 | void crypto4xx_free_sa(struct crypto4xx_ctx *ctx); |
153 | void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx); | ||
154 | int crypto4xx_build_pd(struct crypto_async_request *req, | ||
155 | struct crypto4xx_ctx *ctx, | ||
156 | struct scatterlist *src, | ||
157 | struct scatterlist *dst, | ||
158 | const unsigned int datalen, | ||
159 | const __le32 *iv, const u32 iv_len, | ||
160 | const struct dynamic_sa_ctl *sa, | ||
161 | const unsigned int sa_len, | ||
162 | const unsigned int assoclen); | ||
163 | int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher, | ||
164 | const u8 *key, unsigned int keylen); | ||
165 | int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher, | ||
166 | const u8 *key, unsigned int keylen); | ||
167 | int crypto4xx_setkey_aes_ecb(struct crypto_ablkcipher *cipher, | ||
168 | const u8 *key, unsigned int keylen); | ||
169 | int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher, | ||
170 | const u8 *key, unsigned int keylen); | ||
171 | int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher, | ||
172 | const u8 *key, unsigned int keylen); | ||
173 | int crypto4xx_encrypt(struct ablkcipher_request *req); | ||
174 | int crypto4xx_decrypt(struct ablkcipher_request *req); | ||
175 | int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req); | ||
176 | int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req); | ||
177 | int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm); | ||
178 | int crypto4xx_hash_digest(struct ahash_request *req); | ||
179 | int crypto4xx_hash_final(struct ahash_request *req); | ||
180 | int crypto4xx_hash_update(struct ahash_request *req); | ||
181 | int crypto4xx_hash_init(struct ahash_request *req); | ||
182 | |||
183 | /** | ||
184 | * Note: Only use this function to copy items that is word aligned. | ||
185 | */ | ||
186 | static inline void crypto4xx_memcpy_swab32(u32 *dst, const void *buf, | ||
187 | size_t len) | ||
163 | { | 188 | { |
164 | switch (x->cra_flags & CRYPTO_ALG_TYPE_MASK) { | 189 | for (; len >= 4; buf += 4, len -= 4) |
165 | case CRYPTO_ALG_TYPE_AHASH: | 190 | *dst++ = __swab32p((u32 *) buf); |
166 | return container_of(__crypto_ahash_alg(x), | 191 | |
167 | struct crypto4xx_alg, alg.u.hash); | 192 | if (len) { |
193 | const u8 *tmp = (u8 *)buf; | ||
194 | |||
195 | switch (len) { | ||
196 | case 3: | ||
197 | *dst = (tmp[2] << 16) | | ||
198 | (tmp[1] << 8) | | ||
199 | tmp[0]; | ||
200 | break; | ||
201 | case 2: | ||
202 | *dst = (tmp[1] << 8) | | ||
203 | tmp[0]; | ||
204 | break; | ||
205 | case 1: | ||
206 | *dst = tmp[0]; | ||
207 | break; | ||
208 | default: | ||
209 | break; | ||
210 | } | ||
168 | } | 211 | } |
212 | } | ||
169 | 213 | ||
170 | return container_of(x, struct crypto4xx_alg, alg.u.cipher); | 214 | static inline void crypto4xx_memcpy_from_le32(u32 *dst, const void *buf, |
215 | size_t len) | ||
216 | { | ||
217 | crypto4xx_memcpy_swab32(dst, buf, len); | ||
171 | } | 218 | } |
172 | 219 | ||
173 | extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size); | 220 | static inline void crypto4xx_memcpy_to_le32(__le32 *dst, const void *buf, |
174 | extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx); | 221 | size_t len) |
175 | extern u32 crypto4xx_alloc_sa_rctx(struct crypto4xx_ctx *ctx, | 222 | { |
176 | struct crypto4xx_ctx *rctx); | 223 | crypto4xx_memcpy_swab32((u32 *)dst, buf, len); |
177 | extern void crypto4xx_free_sa_rctx(struct crypto4xx_ctx *rctx); | 224 | } |
178 | extern void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx); | 225 | |
179 | extern u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx); | 226 | int crypto4xx_setauthsize_aead(struct crypto_aead *ciper, |
180 | extern u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx); | 227 | unsigned int authsize); |
181 | extern u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx); | 228 | int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher, |
182 | extern u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx); | 229 | const u8 *key, unsigned int keylen); |
183 | extern void crypto4xx_memcpy_le(unsigned int *dst, | 230 | int crypto4xx_encrypt_aes_ccm(struct aead_request *req); |
184 | const unsigned char *buf, int len); | 231 | int crypto4xx_decrypt_aes_ccm(struct aead_request *req); |
185 | extern u32 crypto4xx_build_pd(struct crypto_async_request *req, | 232 | int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher, |
186 | struct crypto4xx_ctx *ctx, | 233 | const u8 *key, unsigned int keylen); |
187 | struct scatterlist *src, | 234 | int crypto4xx_encrypt_aes_gcm(struct aead_request *req); |
188 | struct scatterlist *dst, | 235 | int crypto4xx_decrypt_aes_gcm(struct aead_request *req); |
189 | unsigned int datalen, | 236 | |
190 | void *iv, u32 iv_len); | ||
191 | extern int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher, | ||
192 | const u8 *key, unsigned int keylen); | ||
193 | extern int crypto4xx_encrypt(struct ablkcipher_request *req); | ||
194 | extern int crypto4xx_decrypt(struct ablkcipher_request *req); | ||
195 | extern int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm); | ||
196 | extern int crypto4xx_hash_digest(struct ahash_request *req); | ||
197 | extern int crypto4xx_hash_final(struct ahash_request *req); | ||
198 | extern int crypto4xx_hash_update(struct ahash_request *req); | ||
199 | extern int crypto4xx_hash_init(struct ahash_request *req); | ||
200 | #endif | 237 | #endif |
diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h index 279b8725559f..0a22ec5d1a96 100644 --- a/drivers/crypto/amcc/crypto4xx_reg_def.h +++ b/drivers/crypto/amcc/crypto4xx_reg_def.h | |||
@@ -261,6 +261,9 @@ union ce_pd_ctl { | |||
261 | } bf; | 261 | } bf; |
262 | u32 w; | 262 | u32 w; |
263 | } __attribute__((packed)); | 263 | } __attribute__((packed)); |
264 | #define PD_CTL_HASH_FINAL BIT(4) | ||
265 | #define PD_CTL_PE_DONE BIT(1) | ||
266 | #define PD_CTL_HOST_READY BIT(0) | ||
264 | 267 | ||
265 | union ce_pd_ctl_len { | 268 | union ce_pd_ctl_len { |
266 | struct { | 269 | struct { |
diff --git a/drivers/crypto/amcc/crypto4xx_sa.c b/drivers/crypto/amcc/crypto4xx_sa.c deleted file mode 100644 index 69182e2cc3ea..000000000000 --- a/drivers/crypto/amcc/crypto4xx_sa.c +++ /dev/null | |||
@@ -1,85 +0,0 @@ | |||
1 | /** | ||
2 | * AMCC SoC PPC4xx Crypto Driver | ||
3 | * | ||
4 | * Copyright (c) 2008 Applied Micro Circuits Corporation. | ||
5 | * All rights reserved. James Hsiao <jhsiao@amcc.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * @file crypto4xx_sa.c | ||
18 | * | ||
19 | * This file implements the security context | ||
20 | * associate format. | ||
21 | */ | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/moduleparam.h> | ||
25 | #include <linux/mod_devicetable.h> | ||
26 | #include <linux/interrupt.h> | ||
27 | #include <linux/spinlock_types.h> | ||
28 | #include <linux/highmem.h> | ||
29 | #include <linux/scatterlist.h> | ||
30 | #include <linux/crypto.h> | ||
31 | #include <crypto/algapi.h> | ||
32 | #include <crypto/des.h> | ||
33 | #include "crypto4xx_reg_def.h" | ||
34 | #include "crypto4xx_sa.h" | ||
35 | #include "crypto4xx_core.h" | ||
36 | |||
37 | u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx) | ||
38 | { | ||
39 | u32 offset; | ||
40 | union dynamic_sa_contents cts; | ||
41 | |||
42 | if (ctx->direction == DIR_INBOUND) | ||
43 | cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents; | ||
44 | else | ||
45 | cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents; | ||
46 | offset = cts.bf.key_size | ||
47 | + cts.bf.inner_size | ||
48 | + cts.bf.outer_size | ||
49 | + cts.bf.spi | ||
50 | + cts.bf.seq_num0 | ||
51 | + cts.bf.seq_num1 | ||
52 | + cts.bf.seq_num_mask0 | ||
53 | + cts.bf.seq_num_mask1 | ||
54 | + cts.bf.seq_num_mask2 | ||
55 | + cts.bf.seq_num_mask3 | ||
56 | + cts.bf.iv0 | ||
57 | + cts.bf.iv1 | ||
58 | + cts.bf.iv2 | ||
59 | + cts.bf.iv3; | ||
60 | |||
61 | return sizeof(struct dynamic_sa_ctl) + offset * 4; | ||
62 | } | ||
63 | |||
64 | u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx) | ||
65 | { | ||
66 | union dynamic_sa_contents cts; | ||
67 | |||
68 | if (ctx->direction == DIR_INBOUND) | ||
69 | cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents; | ||
70 | else | ||
71 | cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents; | ||
72 | return (cts.bf.iv0 + cts.bf.iv1 + cts.bf.iv2 + cts.bf.iv3) * 4; | ||
73 | } | ||
74 | |||
75 | u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx) | ||
76 | { | ||
77 | union dynamic_sa_contents cts; | ||
78 | |||
79 | if (ctx->direction == DIR_INBOUND) | ||
80 | cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents; | ||
81 | else | ||
82 | cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents; | ||
83 | |||
84 | return sizeof(struct dynamic_sa_ctl); | ||
85 | } | ||
diff --git a/drivers/crypto/amcc/crypto4xx_sa.h b/drivers/crypto/amcc/crypto4xx_sa.h index 1352d58d4e34..a4d403528db5 100644 --- a/drivers/crypto/amcc/crypto4xx_sa.h +++ b/drivers/crypto/amcc/crypto4xx_sa.h | |||
@@ -55,6 +55,8 @@ union dynamic_sa_contents { | |||
55 | #define SA_OP_GROUP_BASIC 0 | 55 | #define SA_OP_GROUP_BASIC 0 |
56 | #define SA_OPCODE_ENCRYPT 0 | 56 | #define SA_OPCODE_ENCRYPT 0 |
57 | #define SA_OPCODE_DECRYPT 0 | 57 | #define SA_OPCODE_DECRYPT 0 |
58 | #define SA_OPCODE_ENCRYPT_HASH 1 | ||
59 | #define SA_OPCODE_HASH_DECRYPT 1 | ||
58 | #define SA_OPCODE_HASH 3 | 60 | #define SA_OPCODE_HASH 3 |
59 | #define SA_CIPHER_ALG_DES 0 | 61 | #define SA_CIPHER_ALG_DES 0 |
60 | #define SA_CIPHER_ALG_3DES 1 | 62 | #define SA_CIPHER_ALG_3DES 1 |
@@ -65,6 +67,8 @@ union dynamic_sa_contents { | |||
65 | 67 | ||
66 | #define SA_HASH_ALG_MD5 0 | 68 | #define SA_HASH_ALG_MD5 0 |
67 | #define SA_HASH_ALG_SHA1 1 | 69 | #define SA_HASH_ALG_SHA1 1 |
70 | #define SA_HASH_ALG_GHASH 12 | ||
71 | #define SA_HASH_ALG_CBC_MAC 14 | ||
68 | #define SA_HASH_ALG_NULL 15 | 72 | #define SA_HASH_ALG_NULL 15 |
69 | #define SA_HASH_ALG_SHA1_DIGEST_SIZE 20 | 73 | #define SA_HASH_ALG_SHA1_DIGEST_SIZE 20 |
70 | 74 | ||
@@ -112,6 +116,9 @@ union sa_command_0 { | |||
112 | 116 | ||
113 | #define CRYPTO_MODE_ECB 0 | 117 | #define CRYPTO_MODE_ECB 0 |
114 | #define CRYPTO_MODE_CBC 1 | 118 | #define CRYPTO_MODE_CBC 1 |
119 | #define CRYPTO_MODE_OFB 2 | ||
120 | #define CRYPTO_MODE_CFB 3 | ||
121 | #define CRYPTO_MODE_CTR 4 | ||
115 | 122 | ||
116 | #define CRYPTO_FEEDBACK_MODE_NO_FB 0 | 123 | #define CRYPTO_FEEDBACK_MODE_NO_FB 0 |
117 | #define CRYPTO_FEEDBACK_MODE_64BIT_OFB 0 | 124 | #define CRYPTO_FEEDBACK_MODE_64BIT_OFB 0 |
@@ -169,7 +176,7 @@ union sa_command_1 { | |||
169 | } __attribute__((packed)); | 176 | } __attribute__((packed)); |
170 | 177 | ||
171 | struct dynamic_sa_ctl { | 178 | struct dynamic_sa_ctl { |
172 | u32 sa_contents; | 179 | union dynamic_sa_contents sa_contents; |
173 | union sa_command_0 sa_command_0; | 180 | union sa_command_0 sa_command_0; |
174 | union sa_command_1 sa_command_1; | 181 | union sa_command_1 sa_command_1; |
175 | } __attribute__((packed)); | 182 | } __attribute__((packed)); |
@@ -178,9 +185,12 @@ struct dynamic_sa_ctl { | |||
178 | * State Record for Security Association (SA) | 185 | * State Record for Security Association (SA) |
179 | */ | 186 | */ |
180 | struct sa_state_record { | 187 | struct sa_state_record { |
181 | u32 save_iv[4]; | 188 | __le32 save_iv[4]; |
182 | u32 save_hash_byte_cnt[2]; | 189 | __le32 save_hash_byte_cnt[2]; |
183 | u32 save_digest[16]; | 190 | union { |
191 | u32 save_digest[16]; /* for MD5/SHA */ | ||
192 | __le32 save_digest_le32[16]; /* GHASH / CBC */ | ||
193 | }; | ||
184 | } __attribute__((packed)); | 194 | } __attribute__((packed)); |
185 | 195 | ||
186 | /** | 196 | /** |
@@ -189,8 +199,8 @@ struct sa_state_record { | |||
189 | */ | 199 | */ |
190 | struct dynamic_sa_aes128 { | 200 | struct dynamic_sa_aes128 { |
191 | struct dynamic_sa_ctl ctrl; | 201 | struct dynamic_sa_ctl ctrl; |
192 | u32 key[4]; | 202 | __le32 key[4]; |
193 | u32 iv[4]; /* for CBC, OFC, and CFB mode */ | 203 | __le32 iv[4]; /* for CBC, OFC, and CFB mode */ |
194 | u32 state_ptr; | 204 | u32 state_ptr; |
195 | u32 reserved; | 205 | u32 reserved; |
196 | } __attribute__((packed)); | 206 | } __attribute__((packed)); |
@@ -203,8 +213,8 @@ struct dynamic_sa_aes128 { | |||
203 | */ | 213 | */ |
204 | struct dynamic_sa_aes192 { | 214 | struct dynamic_sa_aes192 { |
205 | struct dynamic_sa_ctl ctrl; | 215 | struct dynamic_sa_ctl ctrl; |
206 | u32 key[6]; | 216 | __le32 key[6]; |
207 | u32 iv[4]; /* for CBC, OFC, and CFB mode */ | 217 | __le32 iv[4]; /* for CBC, OFC, and CFB mode */ |
208 | u32 state_ptr; | 218 | u32 state_ptr; |
209 | u32 reserved; | 219 | u32 reserved; |
210 | } __attribute__((packed)); | 220 | } __attribute__((packed)); |
@@ -217,8 +227,8 @@ struct dynamic_sa_aes192 { | |||
217 | */ | 227 | */ |
218 | struct dynamic_sa_aes256 { | 228 | struct dynamic_sa_aes256 { |
219 | struct dynamic_sa_ctl ctrl; | 229 | struct dynamic_sa_ctl ctrl; |
220 | u32 key[8]; | 230 | __le32 key[8]; |
221 | u32 iv[4]; /* for CBC, OFC, and CFB mode */ | 231 | __le32 iv[4]; /* for CBC, OFC, and CFB mode */ |
222 | u32 state_ptr; | 232 | u32 state_ptr; |
223 | u32 reserved; | 233 | u32 reserved; |
224 | } __attribute__((packed)); | 234 | } __attribute__((packed)); |
@@ -228,16 +238,81 @@ struct dynamic_sa_aes256 { | |||
228 | #define SA_AES_CONTENTS 0x3e000002 | 238 | #define SA_AES_CONTENTS 0x3e000002 |
229 | 239 | ||
230 | /** | 240 | /** |
241 | * Security Association (SA) for AES128 CCM | ||
242 | */ | ||
243 | struct dynamic_sa_aes128_ccm { | ||
244 | struct dynamic_sa_ctl ctrl; | ||
245 | __le32 key[4]; | ||
246 | __le32 iv[4]; | ||
247 | u32 state_ptr; | ||
248 | u32 reserved; | ||
249 | } __packed; | ||
250 | #define SA_AES128_CCM_LEN (sizeof(struct dynamic_sa_aes128_ccm)/4) | ||
251 | #define SA_AES128_CCM_CONTENTS 0x3e000042 | ||
252 | #define SA_AES_CCM_CONTENTS 0x3e000002 | ||
253 | |||
254 | /** | ||
255 | * Security Association (SA) for AES128_GCM | ||
256 | */ | ||
257 | struct dynamic_sa_aes128_gcm { | ||
258 | struct dynamic_sa_ctl ctrl; | ||
259 | __le32 key[4]; | ||
260 | __le32 inner_digest[4]; | ||
261 | __le32 iv[4]; | ||
262 | u32 state_ptr; | ||
263 | u32 reserved; | ||
264 | } __packed; | ||
265 | |||
266 | #define SA_AES128_GCM_LEN (sizeof(struct dynamic_sa_aes128_gcm)/4) | ||
267 | #define SA_AES128_GCM_CONTENTS 0x3e000442 | ||
268 | #define SA_AES_GCM_CONTENTS 0x3e000402 | ||
269 | |||
270 | /** | ||
231 | * Security Association (SA) for HASH160: HMAC-SHA1 | 271 | * Security Association (SA) for HASH160: HMAC-SHA1 |
232 | */ | 272 | */ |
233 | struct dynamic_sa_hash160 { | 273 | struct dynamic_sa_hash160 { |
234 | struct dynamic_sa_ctl ctrl; | 274 | struct dynamic_sa_ctl ctrl; |
235 | u32 inner_digest[5]; | 275 | __le32 inner_digest[5]; |
236 | u32 outer_digest[5]; | 276 | __le32 outer_digest[5]; |
237 | u32 state_ptr; | 277 | u32 state_ptr; |
238 | u32 reserved; | 278 | u32 reserved; |
239 | } __attribute__((packed)); | 279 | } __attribute__((packed)); |
240 | #define SA_HASH160_LEN (sizeof(struct dynamic_sa_hash160)/4) | 280 | #define SA_HASH160_LEN (sizeof(struct dynamic_sa_hash160)/4) |
241 | #define SA_HASH160_CONTENTS 0x2000a502 | 281 | #define SA_HASH160_CONTENTS 0x2000a502 |
242 | 282 | ||
283 | static inline u32 | ||
284 | get_dynamic_sa_offset_state_ptr_field(struct dynamic_sa_ctl *cts) | ||
285 | { | ||
286 | u32 offset; | ||
287 | |||
288 | offset = cts->sa_contents.bf.key_size | ||
289 | + cts->sa_contents.bf.inner_size | ||
290 | + cts->sa_contents.bf.outer_size | ||
291 | + cts->sa_contents.bf.spi | ||
292 | + cts->sa_contents.bf.seq_num0 | ||
293 | + cts->sa_contents.bf.seq_num1 | ||
294 | + cts->sa_contents.bf.seq_num_mask0 | ||
295 | + cts->sa_contents.bf.seq_num_mask1 | ||
296 | + cts->sa_contents.bf.seq_num_mask2 | ||
297 | + cts->sa_contents.bf.seq_num_mask3 | ||
298 | + cts->sa_contents.bf.iv0 | ||
299 | + cts->sa_contents.bf.iv1 | ||
300 | + cts->sa_contents.bf.iv2 | ||
301 | + cts->sa_contents.bf.iv3; | ||
302 | |||
303 | return sizeof(struct dynamic_sa_ctl) + offset * 4; | ||
304 | } | ||
305 | |||
306 | static inline __le32 *get_dynamic_sa_key_field(struct dynamic_sa_ctl *cts) | ||
307 | { | ||
308 | return (__le32 *) ((unsigned long)cts + sizeof(struct dynamic_sa_ctl)); | ||
309 | } | ||
310 | |||
311 | static inline __le32 *get_dynamic_sa_inner_digest(struct dynamic_sa_ctl *cts) | ||
312 | { | ||
313 | return (__le32 *) ((unsigned long)cts + | ||
314 | sizeof(struct dynamic_sa_ctl) + | ||
315 | cts->sa_contents.bf.key_size * 4); | ||
316 | } | ||
317 | |||
243 | #endif | 318 | #endif |
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 29e20c37f3a6..691c6465b71e 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <crypto/scatterwalk.h> | 36 | #include <crypto/scatterwalk.h> |
37 | #include <crypto/algapi.h> | 37 | #include <crypto/algapi.h> |
38 | #include <crypto/aes.h> | 38 | #include <crypto/aes.h> |
39 | #include <crypto/gcm.h> | ||
39 | #include <crypto/xts.h> | 40 | #include <crypto/xts.h> |
40 | #include <crypto/internal/aead.h> | 41 | #include <crypto/internal/aead.h> |
41 | #include <linux/platform_data/crypto-atmel.h> | 42 | #include <linux/platform_data/crypto-atmel.h> |
@@ -76,12 +77,11 @@ | |||
76 | AES_FLAGS_ENCRYPT | \ | 77 | AES_FLAGS_ENCRYPT | \ |
77 | AES_FLAGS_GTAGEN) | 78 | AES_FLAGS_GTAGEN) |
78 | 79 | ||
79 | #define AES_FLAGS_INIT BIT(2) | ||
80 | #define AES_FLAGS_BUSY BIT(3) | 80 | #define AES_FLAGS_BUSY BIT(3) |
81 | #define AES_FLAGS_DUMP_REG BIT(4) | 81 | #define AES_FLAGS_DUMP_REG BIT(4) |
82 | #define AES_FLAGS_OWN_SHA BIT(5) | 82 | #define AES_FLAGS_OWN_SHA BIT(5) |
83 | 83 | ||
84 | #define AES_FLAGS_PERSISTENT (AES_FLAGS_INIT | AES_FLAGS_BUSY) | 84 | #define AES_FLAGS_PERSISTENT AES_FLAGS_BUSY |
85 | 85 | ||
86 | #define ATMEL_AES_QUEUE_LENGTH 50 | 86 | #define ATMEL_AES_QUEUE_LENGTH 50 |
87 | 87 | ||
@@ -110,6 +110,7 @@ struct atmel_aes_base_ctx { | |||
110 | int keylen; | 110 | int keylen; |
111 | u32 key[AES_KEYSIZE_256 / sizeof(u32)]; | 111 | u32 key[AES_KEYSIZE_256 / sizeof(u32)]; |
112 | u16 block_size; | 112 | u16 block_size; |
113 | bool is_aead; | ||
113 | }; | 114 | }; |
114 | 115 | ||
115 | struct atmel_aes_ctx { | 116 | struct atmel_aes_ctx { |
@@ -156,6 +157,7 @@ struct atmel_aes_authenc_ctx { | |||
156 | 157 | ||
157 | struct atmel_aes_reqctx { | 158 | struct atmel_aes_reqctx { |
158 | unsigned long mode; | 159 | unsigned long mode; |
160 | u32 lastc[AES_BLOCK_SIZE / sizeof(u32)]; | ||
159 | }; | 161 | }; |
160 | 162 | ||
161 | #ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC | 163 | #ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC |
@@ -448,11 +450,8 @@ static int atmel_aes_hw_init(struct atmel_aes_dev *dd) | |||
448 | if (err) | 450 | if (err) |
449 | return err; | 451 | return err; |
450 | 452 | ||
451 | if (!(dd->flags & AES_FLAGS_INIT)) { | 453 | atmel_aes_write(dd, AES_CR, AES_CR_SWRST); |
452 | atmel_aes_write(dd, AES_CR, AES_CR_SWRST); | 454 | atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET); |
453 | atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET); | ||
454 | dd->flags |= AES_FLAGS_INIT; | ||
455 | } | ||
456 | 455 | ||
457 | return 0; | 456 | return 0; |
458 | } | 457 | } |
@@ -497,12 +496,34 @@ static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err); | |||
497 | static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err) | 496 | static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err) |
498 | { | 497 | { |
499 | #ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC | 498 | #ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC |
500 | atmel_aes_authenc_complete(dd, err); | 499 | if (dd->ctx->is_aead) |
500 | atmel_aes_authenc_complete(dd, err); | ||
501 | #endif | 501 | #endif |
502 | 502 | ||
503 | clk_disable(dd->iclk); | 503 | clk_disable(dd->iclk); |
504 | dd->flags &= ~AES_FLAGS_BUSY; | 504 | dd->flags &= ~AES_FLAGS_BUSY; |
505 | 505 | ||
506 | if (!dd->ctx->is_aead) { | ||
507 | struct ablkcipher_request *req = | ||
508 | ablkcipher_request_cast(dd->areq); | ||
509 | struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); | ||
510 | struct crypto_ablkcipher *ablkcipher = | ||
511 | crypto_ablkcipher_reqtfm(req); | ||
512 | int ivsize = crypto_ablkcipher_ivsize(ablkcipher); | ||
513 | |||
514 | if (rctx->mode & AES_FLAGS_ENCRYPT) { | ||
515 | scatterwalk_map_and_copy(req->info, req->dst, | ||
516 | req->nbytes - ivsize, ivsize, 0); | ||
517 | } else { | ||
518 | if (req->src == req->dst) { | ||
519 | memcpy(req->info, rctx->lastc, ivsize); | ||
520 | } else { | ||
521 | scatterwalk_map_and_copy(req->info, req->src, | ||
522 | req->nbytes - ivsize, ivsize, 0); | ||
523 | } | ||
524 | } | ||
525 | } | ||
526 | |||
506 | if (dd->is_async) | 527 | if (dd->is_async) |
507 | dd->areq->complete(dd->areq, err); | 528 | dd->areq->complete(dd->areq, err); |
508 | 529 | ||
@@ -1071,11 +1092,11 @@ static int atmel_aes_ctr_start(struct atmel_aes_dev *dd) | |||
1071 | 1092 | ||
1072 | static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) | 1093 | static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) |
1073 | { | 1094 | { |
1074 | struct atmel_aes_base_ctx *ctx; | 1095 | struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); |
1096 | struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); | ||
1075 | struct atmel_aes_reqctx *rctx; | 1097 | struct atmel_aes_reqctx *rctx; |
1076 | struct atmel_aes_dev *dd; | 1098 | struct atmel_aes_dev *dd; |
1077 | 1099 | ||
1078 | ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); | ||
1079 | switch (mode & AES_FLAGS_OPMODE_MASK) { | 1100 | switch (mode & AES_FLAGS_OPMODE_MASK) { |
1080 | case AES_FLAGS_CFB8: | 1101 | case AES_FLAGS_CFB8: |
1081 | ctx->block_size = CFB8_BLOCK_SIZE; | 1102 | ctx->block_size = CFB8_BLOCK_SIZE; |
@@ -1097,6 +1118,7 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) | |||
1097 | ctx->block_size = AES_BLOCK_SIZE; | 1118 | ctx->block_size = AES_BLOCK_SIZE; |
1098 | break; | 1119 | break; |
1099 | } | 1120 | } |
1121 | ctx->is_aead = false; | ||
1100 | 1122 | ||
1101 | dd = atmel_aes_find_dev(ctx); | 1123 | dd = atmel_aes_find_dev(ctx); |
1102 | if (!dd) | 1124 | if (!dd) |
@@ -1105,6 +1127,13 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) | |||
1105 | rctx = ablkcipher_request_ctx(req); | 1127 | rctx = ablkcipher_request_ctx(req); |
1106 | rctx->mode = mode; | 1128 | rctx->mode = mode; |
1107 | 1129 | ||
1130 | if (!(mode & AES_FLAGS_ENCRYPT) && (req->src == req->dst)) { | ||
1131 | int ivsize = crypto_ablkcipher_ivsize(ablkcipher); | ||
1132 | |||
1133 | scatterwalk_map_and_copy(rctx->lastc, req->src, | ||
1134 | (req->nbytes - ivsize), ivsize, 0); | ||
1135 | } | ||
1136 | |||
1108 | return atmel_aes_handle_queue(dd, &req->base); | 1137 | return atmel_aes_handle_queue(dd, &req->base); |
1109 | } | 1138 | } |
1110 | 1139 | ||
@@ -1236,10 +1265,6 @@ static int atmel_aes_ctr_cra_init(struct crypto_tfm *tfm) | |||
1236 | return 0; | 1265 | return 0; |
1237 | } | 1266 | } |
1238 | 1267 | ||
1239 | static void atmel_aes_cra_exit(struct crypto_tfm *tfm) | ||
1240 | { | ||
1241 | } | ||
1242 | |||
1243 | static struct crypto_alg aes_algs[] = { | 1268 | static struct crypto_alg aes_algs[] = { |
1244 | { | 1269 | { |
1245 | .cra_name = "ecb(aes)", | 1270 | .cra_name = "ecb(aes)", |
@@ -1252,7 +1277,6 @@ static struct crypto_alg aes_algs[] = { | |||
1252 | .cra_type = &crypto_ablkcipher_type, | 1277 | .cra_type = &crypto_ablkcipher_type, |
1253 | .cra_module = THIS_MODULE, | 1278 | .cra_module = THIS_MODULE, |
1254 | .cra_init = atmel_aes_cra_init, | 1279 | .cra_init = atmel_aes_cra_init, |
1255 | .cra_exit = atmel_aes_cra_exit, | ||
1256 | .cra_u.ablkcipher = { | 1280 | .cra_u.ablkcipher = { |
1257 | .min_keysize = AES_MIN_KEY_SIZE, | 1281 | .min_keysize = AES_MIN_KEY_SIZE, |
1258 | .max_keysize = AES_MAX_KEY_SIZE, | 1282 | .max_keysize = AES_MAX_KEY_SIZE, |
@@ -1272,7 +1296,6 @@ static struct crypto_alg aes_algs[] = { | |||
1272 | .cra_type = &crypto_ablkcipher_type, | 1296 | .cra_type = &crypto_ablkcipher_type, |
1273 | .cra_module = THIS_MODULE, | 1297 | .cra_module = THIS_MODULE, |
1274 | .cra_init = atmel_aes_cra_init, | 1298 | .cra_init = atmel_aes_cra_init, |
1275 | .cra_exit = atmel_aes_cra_exit, | ||
1276 | .cra_u.ablkcipher = { | 1299 | .cra_u.ablkcipher = { |
1277 | .min_keysize = AES_MIN_KEY_SIZE, | 1300 | .min_keysize = AES_MIN_KEY_SIZE, |
1278 | .max_keysize = AES_MAX_KEY_SIZE, | 1301 | .max_keysize = AES_MAX_KEY_SIZE, |
@@ -1293,7 +1316,6 @@ static struct crypto_alg aes_algs[] = { | |||
1293 | .cra_type = &crypto_ablkcipher_type, | 1316 | .cra_type = &crypto_ablkcipher_type, |
1294 | .cra_module = THIS_MODULE, | 1317 | .cra_module = THIS_MODULE, |
1295 | .cra_init = atmel_aes_cra_init, | 1318 | .cra_init = atmel_aes_cra_init, |
1296 | .cra_exit = atmel_aes_cra_exit, | ||
1297 | .cra_u.ablkcipher = { | 1319 | .cra_u.ablkcipher = { |
1298 | .min_keysize = AES_MIN_KEY_SIZE, | 1320 | .min_keysize = AES_MIN_KEY_SIZE, |
1299 | .max_keysize = AES_MAX_KEY_SIZE, | 1321 | .max_keysize = AES_MAX_KEY_SIZE, |
@@ -1314,7 +1336,6 @@ static struct crypto_alg aes_algs[] = { | |||
1314 | .cra_type = &crypto_ablkcipher_type, | 1336 | .cra_type = &crypto_ablkcipher_type, |
1315 | .cra_module = THIS_MODULE, | 1337 | .cra_module = THIS_MODULE, |
1316 | .cra_init = atmel_aes_cra_init, | 1338 | .cra_init = atmel_aes_cra_init, |
1317 | .cra_exit = atmel_aes_cra_exit, | ||
1318 | .cra_u.ablkcipher = { | 1339 | .cra_u.ablkcipher = { |
1319 | .min_keysize = AES_MIN_KEY_SIZE, | 1340 | .min_keysize = AES_MIN_KEY_SIZE, |
1320 | .max_keysize = AES_MAX_KEY_SIZE, | 1341 | .max_keysize = AES_MAX_KEY_SIZE, |
@@ -1335,7 +1356,6 @@ static struct crypto_alg aes_algs[] = { | |||
1335 | .cra_type = &crypto_ablkcipher_type, | 1356 | .cra_type = &crypto_ablkcipher_type, |
1336 | .cra_module = THIS_MODULE, | 1357 | .cra_module = THIS_MODULE, |
1337 | .cra_init = atmel_aes_cra_init, | 1358 | .cra_init = atmel_aes_cra_init, |
1338 | .cra_exit = atmel_aes_cra_exit, | ||
1339 | .cra_u.ablkcipher = { | 1359 | .cra_u.ablkcipher = { |
1340 | .min_keysize = AES_MIN_KEY_SIZE, | 1360 | .min_keysize = AES_MIN_KEY_SIZE, |
1341 | .max_keysize = AES_MAX_KEY_SIZE, | 1361 | .max_keysize = AES_MAX_KEY_SIZE, |
@@ -1356,7 +1376,6 @@ static struct crypto_alg aes_algs[] = { | |||
1356 | .cra_type = &crypto_ablkcipher_type, | 1376 | .cra_type = &crypto_ablkcipher_type, |
1357 | .cra_module = THIS_MODULE, | 1377 | .cra_module = THIS_MODULE, |
1358 | .cra_init = atmel_aes_cra_init, | 1378 | .cra_init = atmel_aes_cra_init, |
1359 | .cra_exit = atmel_aes_cra_exit, | ||
1360 | .cra_u.ablkcipher = { | 1379 | .cra_u.ablkcipher = { |
1361 | .min_keysize = AES_MIN_KEY_SIZE, | 1380 | .min_keysize = AES_MIN_KEY_SIZE, |
1362 | .max_keysize = AES_MAX_KEY_SIZE, | 1381 | .max_keysize = AES_MAX_KEY_SIZE, |
@@ -1377,7 +1396,6 @@ static struct crypto_alg aes_algs[] = { | |||
1377 | .cra_type = &crypto_ablkcipher_type, | 1396 | .cra_type = &crypto_ablkcipher_type, |
1378 | .cra_module = THIS_MODULE, | 1397 | .cra_module = THIS_MODULE, |
1379 | .cra_init = atmel_aes_cra_init, | 1398 | .cra_init = atmel_aes_cra_init, |
1380 | .cra_exit = atmel_aes_cra_exit, | ||
1381 | .cra_u.ablkcipher = { | 1399 | .cra_u.ablkcipher = { |
1382 | .min_keysize = AES_MIN_KEY_SIZE, | 1400 | .min_keysize = AES_MIN_KEY_SIZE, |
1383 | .max_keysize = AES_MAX_KEY_SIZE, | 1401 | .max_keysize = AES_MAX_KEY_SIZE, |
@@ -1398,7 +1416,6 @@ static struct crypto_alg aes_algs[] = { | |||
1398 | .cra_type = &crypto_ablkcipher_type, | 1416 | .cra_type = &crypto_ablkcipher_type, |
1399 | .cra_module = THIS_MODULE, | 1417 | .cra_module = THIS_MODULE, |
1400 | .cra_init = atmel_aes_ctr_cra_init, | 1418 | .cra_init = atmel_aes_ctr_cra_init, |
1401 | .cra_exit = atmel_aes_cra_exit, | ||
1402 | .cra_u.ablkcipher = { | 1419 | .cra_u.ablkcipher = { |
1403 | .min_keysize = AES_MIN_KEY_SIZE, | 1420 | .min_keysize = AES_MIN_KEY_SIZE, |
1404 | .max_keysize = AES_MAX_KEY_SIZE, | 1421 | .max_keysize = AES_MAX_KEY_SIZE, |
@@ -1421,7 +1438,6 @@ static struct crypto_alg aes_cfb64_alg = { | |||
1421 | .cra_type = &crypto_ablkcipher_type, | 1438 | .cra_type = &crypto_ablkcipher_type, |
1422 | .cra_module = THIS_MODULE, | 1439 | .cra_module = THIS_MODULE, |
1423 | .cra_init = atmel_aes_cra_init, | 1440 | .cra_init = atmel_aes_cra_init, |
1424 | .cra_exit = atmel_aes_cra_exit, | ||
1425 | .cra_u.ablkcipher = { | 1441 | .cra_u.ablkcipher = { |
1426 | .min_keysize = AES_MIN_KEY_SIZE, | 1442 | .min_keysize = AES_MIN_KEY_SIZE, |
1427 | .max_keysize = AES_MAX_KEY_SIZE, | 1443 | .max_keysize = AES_MAX_KEY_SIZE, |
@@ -1532,7 +1548,7 @@ static int atmel_aes_gcm_start(struct atmel_aes_dev *dd) | |||
1532 | if (err) | 1548 | if (err) |
1533 | return atmel_aes_complete(dd, err); | 1549 | return atmel_aes_complete(dd, err); |
1534 | 1550 | ||
1535 | if (likely(ivsize == 12)) { | 1551 | if (likely(ivsize == GCM_AES_IV_SIZE)) { |
1536 | memcpy(ctx->j0, iv, ivsize); | 1552 | memcpy(ctx->j0, iv, ivsize); |
1537 | ctx->j0[3] = cpu_to_be32(1); | 1553 | ctx->j0[3] = cpu_to_be32(1); |
1538 | return atmel_aes_gcm_process(dd); | 1554 | return atmel_aes_gcm_process(dd); |
@@ -1739,6 +1755,7 @@ static int atmel_aes_gcm_crypt(struct aead_request *req, | |||
1739 | 1755 | ||
1740 | ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); | 1756 | ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); |
1741 | ctx->block_size = AES_BLOCK_SIZE; | 1757 | ctx->block_size = AES_BLOCK_SIZE; |
1758 | ctx->is_aead = true; | ||
1742 | 1759 | ||
1743 | dd = atmel_aes_find_dev(ctx); | 1760 | dd = atmel_aes_find_dev(ctx); |
1744 | if (!dd) | 1761 | if (!dd) |
@@ -1808,19 +1825,13 @@ static int atmel_aes_gcm_init(struct crypto_aead *tfm) | |||
1808 | return 0; | 1825 | return 0; |
1809 | } | 1826 | } |
1810 | 1827 | ||
1811 | static void atmel_aes_gcm_exit(struct crypto_aead *tfm) | ||
1812 | { | ||
1813 | |||
1814 | } | ||
1815 | |||
1816 | static struct aead_alg aes_gcm_alg = { | 1828 | static struct aead_alg aes_gcm_alg = { |
1817 | .setkey = atmel_aes_gcm_setkey, | 1829 | .setkey = atmel_aes_gcm_setkey, |
1818 | .setauthsize = atmel_aes_gcm_setauthsize, | 1830 | .setauthsize = atmel_aes_gcm_setauthsize, |
1819 | .encrypt = atmel_aes_gcm_encrypt, | 1831 | .encrypt = atmel_aes_gcm_encrypt, |
1820 | .decrypt = atmel_aes_gcm_decrypt, | 1832 | .decrypt = atmel_aes_gcm_decrypt, |
1821 | .init = atmel_aes_gcm_init, | 1833 | .init = atmel_aes_gcm_init, |
1822 | .exit = atmel_aes_gcm_exit, | 1834 | .ivsize = GCM_AES_IV_SIZE, |
1823 | .ivsize = 12, | ||
1824 | .maxauthsize = AES_BLOCK_SIZE, | 1835 | .maxauthsize = AES_BLOCK_SIZE, |
1825 | 1836 | ||
1826 | .base = { | 1837 | .base = { |
@@ -1955,7 +1966,6 @@ static struct crypto_alg aes_xts_alg = { | |||
1955 | .cra_type = &crypto_ablkcipher_type, | 1966 | .cra_type = &crypto_ablkcipher_type, |
1956 | .cra_module = THIS_MODULE, | 1967 | .cra_module = THIS_MODULE, |
1957 | .cra_init = atmel_aes_xts_cra_init, | 1968 | .cra_init = atmel_aes_xts_cra_init, |
1958 | .cra_exit = atmel_aes_cra_exit, | ||
1959 | .cra_u.ablkcipher = { | 1969 | .cra_u.ablkcipher = { |
1960 | .min_keysize = 2 * AES_MIN_KEY_SIZE, | 1970 | .min_keysize = 2 * AES_MIN_KEY_SIZE, |
1961 | .max_keysize = 2 * AES_MAX_KEY_SIZE, | 1971 | .max_keysize = 2 * AES_MAX_KEY_SIZE, |
@@ -2223,6 +2233,7 @@ static int atmel_aes_authenc_crypt(struct aead_request *req, | |||
2223 | 2233 | ||
2224 | rctx->base.mode = mode; | 2234 | rctx->base.mode = mode; |
2225 | ctx->block_size = AES_BLOCK_SIZE; | 2235 | ctx->block_size = AES_BLOCK_SIZE; |
2236 | ctx->is_aead = true; | ||
2226 | 2237 | ||
2227 | dd = atmel_aes_find_dev(ctx); | 2238 | dd = atmel_aes_find_dev(ctx); |
2228 | if (!dd) | 2239 | if (!dd) |
@@ -2382,7 +2393,6 @@ static int atmel_aes_dma_init(struct atmel_aes_dev *dd, | |||
2382 | struct crypto_platform_data *pdata) | 2393 | struct crypto_platform_data *pdata) |
2383 | { | 2394 | { |
2384 | struct at_dma_slave *slave; | 2395 | struct at_dma_slave *slave; |
2385 | int err = -ENOMEM; | ||
2386 | dma_cap_mask_t mask; | 2396 | dma_cap_mask_t mask; |
2387 | 2397 | ||
2388 | dma_cap_zero(mask); | 2398 | dma_cap_zero(mask); |
@@ -2407,7 +2417,7 @@ err_dma_out: | |||
2407 | dma_release_channel(dd->src.chan); | 2417 | dma_release_channel(dd->src.chan); |
2408 | err_dma_in: | 2418 | err_dma_in: |
2409 | dev_warn(dd->dev, "no DMA channel available\n"); | 2419 | dev_warn(dd->dev, "no DMA channel available\n"); |
2410 | return err; | 2420 | return -ENODEV; |
2411 | } | 2421 | } |
2412 | 2422 | ||
2413 | static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd) | 2423 | static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd) |
@@ -2658,8 +2668,6 @@ static int atmel_aes_probe(struct platform_device *pdev) | |||
2658 | 2668 | ||
2659 | crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH); | 2669 | crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH); |
2660 | 2670 | ||
2661 | aes_dd->irq = -1; | ||
2662 | |||
2663 | /* Get the base address */ | 2671 | /* Get the base address */ |
2664 | aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 2672 | aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
2665 | if (!aes_res) { | 2673 | if (!aes_res) { |
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index 3e2f41b3eaf3..8874aa5ca0f7 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c | |||
@@ -2628,7 +2628,6 @@ static bool atmel_sha_filter(struct dma_chan *chan, void *slave) | |||
2628 | static int atmel_sha_dma_init(struct atmel_sha_dev *dd, | 2628 | static int atmel_sha_dma_init(struct atmel_sha_dev *dd, |
2629 | struct crypto_platform_data *pdata) | 2629 | struct crypto_platform_data *pdata) |
2630 | { | 2630 | { |
2631 | int err = -ENOMEM; | ||
2632 | dma_cap_mask_t mask_in; | 2631 | dma_cap_mask_t mask_in; |
2633 | 2632 | ||
2634 | /* Try to grab DMA channel */ | 2633 | /* Try to grab DMA channel */ |
@@ -2639,7 +2638,7 @@ static int atmel_sha_dma_init(struct atmel_sha_dev *dd, | |||
2639 | atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx"); | 2638 | atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx"); |
2640 | if (!dd->dma_lch_in.chan) { | 2639 | if (!dd->dma_lch_in.chan) { |
2641 | dev_warn(dd->dev, "no DMA channel available\n"); | 2640 | dev_warn(dd->dev, "no DMA channel available\n"); |
2642 | return err; | 2641 | return -ENODEV; |
2643 | } | 2642 | } |
2644 | 2643 | ||
2645 | dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV; | 2644 | dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV; |
@@ -2778,8 +2777,6 @@ static int atmel_sha_probe(struct platform_device *pdev) | |||
2778 | 2777 | ||
2779 | crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH); | 2778 | crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH); |
2780 | 2779 | ||
2781 | sha_dd->irq = -1; | ||
2782 | |||
2783 | /* Get the base address */ | 2780 | /* Get the base address */ |
2784 | sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 2781 | sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
2785 | if (!sha_res) { | 2782 | if (!sha_res) { |
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index f4b335dda568..592124f8382b 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c | |||
@@ -720,7 +720,6 @@ static bool atmel_tdes_filter(struct dma_chan *chan, void *slave) | |||
720 | static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd, | 720 | static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd, |
721 | struct crypto_platform_data *pdata) | 721 | struct crypto_platform_data *pdata) |
722 | { | 722 | { |
723 | int err = -ENOMEM; | ||
724 | dma_cap_mask_t mask; | 723 | dma_cap_mask_t mask; |
725 | 724 | ||
726 | dma_cap_zero(mask); | 725 | dma_cap_zero(mask); |
@@ -765,7 +764,7 @@ err_dma_out: | |||
765 | dma_release_channel(dd->dma_lch_in.chan); | 764 | dma_release_channel(dd->dma_lch_in.chan); |
766 | err_dma_in: | 765 | err_dma_in: |
767 | dev_warn(dd->dev, "no DMA channel available\n"); | 766 | dev_warn(dd->dev, "no DMA channel available\n"); |
768 | return err; | 767 | return -ENODEV; |
769 | } | 768 | } |
770 | 769 | ||
771 | static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd) | 770 | static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd) |
@@ -912,10 +911,6 @@ static int atmel_tdes_cra_init(struct crypto_tfm *tfm) | |||
912 | return 0; | 911 | return 0; |
913 | } | 912 | } |
914 | 913 | ||
915 | static void atmel_tdes_cra_exit(struct crypto_tfm *tfm) | ||
916 | { | ||
917 | } | ||
918 | |||
919 | static struct crypto_alg tdes_algs[] = { | 914 | static struct crypto_alg tdes_algs[] = { |
920 | { | 915 | { |
921 | .cra_name = "ecb(des)", | 916 | .cra_name = "ecb(des)", |
@@ -928,7 +923,6 @@ static struct crypto_alg tdes_algs[] = { | |||
928 | .cra_type = &crypto_ablkcipher_type, | 923 | .cra_type = &crypto_ablkcipher_type, |
929 | .cra_module = THIS_MODULE, | 924 | .cra_module = THIS_MODULE, |
930 | .cra_init = atmel_tdes_cra_init, | 925 | .cra_init = atmel_tdes_cra_init, |
931 | .cra_exit = atmel_tdes_cra_exit, | ||
932 | .cra_u.ablkcipher = { | 926 | .cra_u.ablkcipher = { |
933 | .min_keysize = DES_KEY_SIZE, | 927 | .min_keysize = DES_KEY_SIZE, |
934 | .max_keysize = DES_KEY_SIZE, | 928 | .max_keysize = DES_KEY_SIZE, |
@@ -948,7 +942,6 @@ static struct crypto_alg tdes_algs[] = { | |||
948 | .cra_type = &crypto_ablkcipher_type, | 942 | .cra_type = &crypto_ablkcipher_type, |
949 | .cra_module = THIS_MODULE, | 943 | .cra_module = THIS_MODULE, |
950 | .cra_init = atmel_tdes_cra_init, | 944 | .cra_init = atmel_tdes_cra_init, |
951 | .cra_exit = atmel_tdes_cra_exit, | ||
952 | .cra_u.ablkcipher = { | 945 | .cra_u.ablkcipher = { |
953 | .min_keysize = DES_KEY_SIZE, | 946 | .min_keysize = DES_KEY_SIZE, |
954 | .max_keysize = DES_KEY_SIZE, | 947 | .max_keysize = DES_KEY_SIZE, |
@@ -969,7 +962,6 @@ static struct crypto_alg tdes_algs[] = { | |||
969 | .cra_type = &crypto_ablkcipher_type, | 962 | .cra_type = &crypto_ablkcipher_type, |
970 | .cra_module = THIS_MODULE, | 963 | .cra_module = THIS_MODULE, |
971 | .cra_init = atmel_tdes_cra_init, | 964 | .cra_init = atmel_tdes_cra_init, |
972 | .cra_exit = atmel_tdes_cra_exit, | ||
973 | .cra_u.ablkcipher = { | 965 | .cra_u.ablkcipher = { |
974 | .min_keysize = DES_KEY_SIZE, | 966 | .min_keysize = DES_KEY_SIZE, |
975 | .max_keysize = DES_KEY_SIZE, | 967 | .max_keysize = DES_KEY_SIZE, |
@@ -990,7 +982,6 @@ static struct crypto_alg tdes_algs[] = { | |||
990 | .cra_type = &crypto_ablkcipher_type, | 982 | .cra_type = &crypto_ablkcipher_type, |
991 | .cra_module = THIS_MODULE, | 983 | .cra_module = THIS_MODULE, |
992 | .cra_init = atmel_tdes_cra_init, | 984 | .cra_init = atmel_tdes_cra_init, |
993 | .cra_exit = atmel_tdes_cra_exit, | ||
994 | .cra_u.ablkcipher = { | 985 | .cra_u.ablkcipher = { |
995 | .min_keysize = DES_KEY_SIZE, | 986 | .min_keysize = DES_KEY_SIZE, |
996 | .max_keysize = DES_KEY_SIZE, | 987 | .max_keysize = DES_KEY_SIZE, |
@@ -1011,7 +1002,6 @@ static struct crypto_alg tdes_algs[] = { | |||
1011 | .cra_type = &crypto_ablkcipher_type, | 1002 | .cra_type = &crypto_ablkcipher_type, |
1012 | .cra_module = THIS_MODULE, | 1003 | .cra_module = THIS_MODULE, |
1013 | .cra_init = atmel_tdes_cra_init, | 1004 | .cra_init = atmel_tdes_cra_init, |
1014 | .cra_exit = atmel_tdes_cra_exit, | ||
1015 | .cra_u.ablkcipher = { | 1005 | .cra_u.ablkcipher = { |
1016 | .min_keysize = DES_KEY_SIZE, | 1006 | .min_keysize = DES_KEY_SIZE, |
1017 | .max_keysize = DES_KEY_SIZE, | 1007 | .max_keysize = DES_KEY_SIZE, |
@@ -1032,7 +1022,6 @@ static struct crypto_alg tdes_algs[] = { | |||
1032 | .cra_type = &crypto_ablkcipher_type, | 1022 | .cra_type = &crypto_ablkcipher_type, |
1033 | .cra_module = THIS_MODULE, | 1023 | .cra_module = THIS_MODULE, |
1034 | .cra_init = atmel_tdes_cra_init, | 1024 | .cra_init = atmel_tdes_cra_init, |
1035 | .cra_exit = atmel_tdes_cra_exit, | ||
1036 | .cra_u.ablkcipher = { | 1025 | .cra_u.ablkcipher = { |
1037 | .min_keysize = DES_KEY_SIZE, | 1026 | .min_keysize = DES_KEY_SIZE, |
1038 | .max_keysize = DES_KEY_SIZE, | 1027 | .max_keysize = DES_KEY_SIZE, |
@@ -1053,7 +1042,6 @@ static struct crypto_alg tdes_algs[] = { | |||
1053 | .cra_type = &crypto_ablkcipher_type, | 1042 | .cra_type = &crypto_ablkcipher_type, |
1054 | .cra_module = THIS_MODULE, | 1043 | .cra_module = THIS_MODULE, |
1055 | .cra_init = atmel_tdes_cra_init, | 1044 | .cra_init = atmel_tdes_cra_init, |
1056 | .cra_exit = atmel_tdes_cra_exit, | ||
1057 | .cra_u.ablkcipher = { | 1045 | .cra_u.ablkcipher = { |
1058 | .min_keysize = DES_KEY_SIZE, | 1046 | .min_keysize = DES_KEY_SIZE, |
1059 | .max_keysize = DES_KEY_SIZE, | 1047 | .max_keysize = DES_KEY_SIZE, |
@@ -1074,7 +1062,6 @@ static struct crypto_alg tdes_algs[] = { | |||
1074 | .cra_type = &crypto_ablkcipher_type, | 1062 | .cra_type = &crypto_ablkcipher_type, |
1075 | .cra_module = THIS_MODULE, | 1063 | .cra_module = THIS_MODULE, |
1076 | .cra_init = atmel_tdes_cra_init, | 1064 | .cra_init = atmel_tdes_cra_init, |
1077 | .cra_exit = atmel_tdes_cra_exit, | ||
1078 | .cra_u.ablkcipher = { | 1065 | .cra_u.ablkcipher = { |
1079 | .min_keysize = 2 * DES_KEY_SIZE, | 1066 | .min_keysize = 2 * DES_KEY_SIZE, |
1080 | .max_keysize = 3 * DES_KEY_SIZE, | 1067 | .max_keysize = 3 * DES_KEY_SIZE, |
@@ -1094,7 +1081,6 @@ static struct crypto_alg tdes_algs[] = { | |||
1094 | .cra_type = &crypto_ablkcipher_type, | 1081 | .cra_type = &crypto_ablkcipher_type, |
1095 | .cra_module = THIS_MODULE, | 1082 | .cra_module = THIS_MODULE, |
1096 | .cra_init = atmel_tdes_cra_init, | 1083 | .cra_init = atmel_tdes_cra_init, |
1097 | .cra_exit = atmel_tdes_cra_exit, | ||
1098 | .cra_u.ablkcipher = { | 1084 | .cra_u.ablkcipher = { |
1099 | .min_keysize = 2*DES_KEY_SIZE, | 1085 | .min_keysize = 2*DES_KEY_SIZE, |
1100 | .max_keysize = 3*DES_KEY_SIZE, | 1086 | .max_keysize = 3*DES_KEY_SIZE, |
@@ -1115,7 +1101,6 @@ static struct crypto_alg tdes_algs[] = { | |||
1115 | .cra_type = &crypto_ablkcipher_type, | 1101 | .cra_type = &crypto_ablkcipher_type, |
1116 | .cra_module = THIS_MODULE, | 1102 | .cra_module = THIS_MODULE, |
1117 | .cra_init = atmel_tdes_cra_init, | 1103 | .cra_init = atmel_tdes_cra_init, |
1118 | .cra_exit = atmel_tdes_cra_exit, | ||
1119 | .cra_u.ablkcipher = { | 1104 | .cra_u.ablkcipher = { |
1120 | .min_keysize = 2*DES_KEY_SIZE, | 1105 | .min_keysize = 2*DES_KEY_SIZE, |
1121 | .max_keysize = 2*DES_KEY_SIZE, | 1106 | .max_keysize = 2*DES_KEY_SIZE, |
@@ -1136,7 +1121,6 @@ static struct crypto_alg tdes_algs[] = { | |||
1136 | .cra_type = &crypto_ablkcipher_type, | 1121 | .cra_type = &crypto_ablkcipher_type, |
1137 | .cra_module = THIS_MODULE, | 1122 | .cra_module = THIS_MODULE, |
1138 | .cra_init = atmel_tdes_cra_init, | 1123 | .cra_init = atmel_tdes_cra_init, |
1139 | .cra_exit = atmel_tdes_cra_exit, | ||
1140 | .cra_u.ablkcipher = { | 1124 | .cra_u.ablkcipher = { |
1141 | .min_keysize = 2*DES_KEY_SIZE, | 1125 | .min_keysize = 2*DES_KEY_SIZE, |
1142 | .max_keysize = 2*DES_KEY_SIZE, | 1126 | .max_keysize = 2*DES_KEY_SIZE, |
@@ -1157,7 +1141,6 @@ static struct crypto_alg tdes_algs[] = { | |||
1157 | .cra_type = &crypto_ablkcipher_type, | 1141 | .cra_type = &crypto_ablkcipher_type, |
1158 | .cra_module = THIS_MODULE, | 1142 | .cra_module = THIS_MODULE, |
1159 | .cra_init = atmel_tdes_cra_init, | 1143 | .cra_init = atmel_tdes_cra_init, |
1160 | .cra_exit = atmel_tdes_cra_exit, | ||
1161 | .cra_u.ablkcipher = { | 1144 | .cra_u.ablkcipher = { |
1162 | .min_keysize = 2*DES_KEY_SIZE, | 1145 | .min_keysize = 2*DES_KEY_SIZE, |
1163 | .max_keysize = 2*DES_KEY_SIZE, | 1146 | .max_keysize = 2*DES_KEY_SIZE, |
@@ -1178,7 +1161,6 @@ static struct crypto_alg tdes_algs[] = { | |||
1178 | .cra_type = &crypto_ablkcipher_type, | 1161 | .cra_type = &crypto_ablkcipher_type, |
1179 | .cra_module = THIS_MODULE, | 1162 | .cra_module = THIS_MODULE, |
1180 | .cra_init = atmel_tdes_cra_init, | 1163 | .cra_init = atmel_tdes_cra_init, |
1181 | .cra_exit = atmel_tdes_cra_exit, | ||
1182 | .cra_u.ablkcipher = { | 1164 | .cra_u.ablkcipher = { |
1183 | .min_keysize = 2*DES_KEY_SIZE, | 1165 | .min_keysize = 2*DES_KEY_SIZE, |
1184 | .max_keysize = 2*DES_KEY_SIZE, | 1166 | .max_keysize = 2*DES_KEY_SIZE, |
@@ -1199,7 +1181,6 @@ static struct crypto_alg tdes_algs[] = { | |||
1199 | .cra_type = &crypto_ablkcipher_type, | 1181 | .cra_type = &crypto_ablkcipher_type, |
1200 | .cra_module = THIS_MODULE, | 1182 | .cra_module = THIS_MODULE, |
1201 | .cra_init = atmel_tdes_cra_init, | 1183 | .cra_init = atmel_tdes_cra_init, |
1202 | .cra_exit = atmel_tdes_cra_exit, | ||
1203 | .cra_u.ablkcipher = { | 1184 | .cra_u.ablkcipher = { |
1204 | .min_keysize = 2*DES_KEY_SIZE, | 1185 | .min_keysize = 2*DES_KEY_SIZE, |
1205 | .max_keysize = 3*DES_KEY_SIZE, | 1186 | .max_keysize = 3*DES_KEY_SIZE, |
@@ -1382,8 +1363,6 @@ static int atmel_tdes_probe(struct platform_device *pdev) | |||
1382 | 1363 | ||
1383 | crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH); | 1364 | crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH); |
1384 | 1365 | ||
1385 | tdes_dd->irq = -1; | ||
1386 | |||
1387 | /* Get the base address */ | 1366 | /* Get the base address */ |
1388 | tdes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1367 | tdes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1389 | if (!tdes_res) { | 1368 | if (!tdes_res) { |
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 8685c7e4debd..ce70b44d0fb6 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c | |||
@@ -256,6 +256,44 @@ spu_ablkcipher_tx_sg_create(struct brcm_message *mssg, | |||
256 | return 0; | 256 | return 0; |
257 | } | 257 | } |
258 | 258 | ||
259 | static int mailbox_send_message(struct brcm_message *mssg, u32 flags, | ||
260 | u8 chan_idx) | ||
261 | { | ||
262 | int err; | ||
263 | int retry_cnt = 0; | ||
264 | struct device *dev = &(iproc_priv.pdev->dev); | ||
265 | |||
266 | err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg); | ||
267 | if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) { | ||
268 | while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { | ||
269 | /* | ||
270 | * Mailbox queue is full. Since MAY_SLEEP is set, assume | ||
271 | * not in atomic context and we can wait and try again. | ||
272 | */ | ||
273 | retry_cnt++; | ||
274 | usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); | ||
275 | err = mbox_send_message(iproc_priv.mbox[chan_idx], | ||
276 | mssg); | ||
277 | atomic_inc(&iproc_priv.mb_no_spc); | ||
278 | } | ||
279 | } | ||
280 | if (err < 0) { | ||
281 | atomic_inc(&iproc_priv.mb_send_fail); | ||
282 | return err; | ||
283 | } | ||
284 | |||
285 | /* Check error returned by mailbox controller */ | ||
286 | err = mssg->error; | ||
287 | if (unlikely(err < 0)) { | ||
288 | dev_err(dev, "message error %d", err); | ||
289 | /* Signal txdone for mailbox channel */ | ||
290 | } | ||
291 | |||
292 | /* Signal txdone for mailbox channel */ | ||
293 | mbox_client_txdone(iproc_priv.mbox[chan_idx], err); | ||
294 | return err; | ||
295 | } | ||
296 | |||
259 | /** | 297 | /** |
260 | * handle_ablkcipher_req() - Submit as much of a block cipher request as fits in | 298 | * handle_ablkcipher_req() - Submit as much of a block cipher request as fits in |
261 | * a single SPU request message, starting at the current position in the request | 299 | * a single SPU request message, starting at the current position in the request |
@@ -293,7 +331,6 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx) | |||
293 | u32 pad_len; /* total length of all padding */ | 331 | u32 pad_len; /* total length of all padding */ |
294 | bool update_key = false; | 332 | bool update_key = false; |
295 | struct brcm_message *mssg; /* mailbox message */ | 333 | struct brcm_message *mssg; /* mailbox message */ |
296 | int retry_cnt = 0; | ||
297 | 334 | ||
298 | /* number of entries in src and dst sg in mailbox message. */ | 335 | /* number of entries in src and dst sg in mailbox message. */ |
299 | u8 rx_frag_num = 2; /* response header and STATUS */ | 336 | u8 rx_frag_num = 2; /* response header and STATUS */ |
@@ -462,24 +499,9 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx) | |||
462 | if (err) | 499 | if (err) |
463 | return err; | 500 | return err; |
464 | 501 | ||
465 | err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg); | 502 | err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx); |
466 | if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) { | 503 | if (unlikely(err < 0)) |
467 | while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { | ||
468 | /* | ||
469 | * Mailbox queue is full. Since MAY_SLEEP is set, assume | ||
470 | * not in atomic context and we can wait and try again. | ||
471 | */ | ||
472 | retry_cnt++; | ||
473 | usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); | ||
474 | err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], | ||
475 | mssg); | ||
476 | atomic_inc(&iproc_priv.mb_no_spc); | ||
477 | } | ||
478 | } | ||
479 | if (unlikely(err < 0)) { | ||
480 | atomic_inc(&iproc_priv.mb_send_fail); | ||
481 | return err; | 504 | return err; |
482 | } | ||
483 | 505 | ||
484 | return -EINPROGRESS; | 506 | return -EINPROGRESS; |
485 | } | 507 | } |
@@ -710,7 +732,6 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx) | |||
710 | u32 spu_hdr_len; | 732 | u32 spu_hdr_len; |
711 | unsigned int digestsize; | 733 | unsigned int digestsize; |
712 | u16 rem = 0; | 734 | u16 rem = 0; |
713 | int retry_cnt = 0; | ||
714 | 735 | ||
715 | /* | 736 | /* |
716 | * number of entries in src and dst sg. Always includes SPU msg header. | 737 | * number of entries in src and dst sg. Always includes SPU msg header. |
@@ -904,24 +925,10 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx) | |||
904 | if (err) | 925 | if (err) |
905 | return err; | 926 | return err; |
906 | 927 | ||
907 | err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg); | 928 | err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx); |
908 | if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) { | 929 | if (unlikely(err < 0)) |
909 | while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { | ||
910 | /* | ||
911 | * Mailbox queue is full. Since MAY_SLEEP is set, assume | ||
912 | * not in atomic context and we can wait and try again. | ||
913 | */ | ||
914 | retry_cnt++; | ||
915 | usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); | ||
916 | err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], | ||
917 | mssg); | ||
918 | atomic_inc(&iproc_priv.mb_no_spc); | ||
919 | } | ||
920 | } | ||
921 | if (err < 0) { | ||
922 | atomic_inc(&iproc_priv.mb_send_fail); | ||
923 | return err; | 930 | return err; |
924 | } | 931 | |
925 | return -EINPROGRESS; | 932 | return -EINPROGRESS; |
926 | } | 933 | } |
927 | 934 | ||
@@ -1320,7 +1327,6 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx) | |||
1320 | int assoc_nents = 0; | 1327 | int assoc_nents = 0; |
1321 | bool incl_icv = false; | 1328 | bool incl_icv = false; |
1322 | unsigned int digestsize = ctx->digestsize; | 1329 | unsigned int digestsize = ctx->digestsize; |
1323 | int retry_cnt = 0; | ||
1324 | 1330 | ||
1325 | /* number of entries in src and dst sg. Always includes SPU msg header. | 1331 | /* number of entries in src and dst sg. Always includes SPU msg header. |
1326 | */ | 1332 | */ |
@@ -1367,11 +1373,11 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx) | |||
1367 | * expects AAD to include just SPI and seqno. So | 1373 | * expects AAD to include just SPI and seqno. So |
1368 | * subtract off the IV len. | 1374 | * subtract off the IV len. |
1369 | */ | 1375 | */ |
1370 | aead_parms.assoc_size -= GCM_ESP_IV_SIZE; | 1376 | aead_parms.assoc_size -= GCM_RFC4106_IV_SIZE; |
1371 | 1377 | ||
1372 | if (rctx->is_encrypt) { | 1378 | if (rctx->is_encrypt) { |
1373 | aead_parms.return_iv = true; | 1379 | aead_parms.return_iv = true; |
1374 | aead_parms.ret_iv_len = GCM_ESP_IV_SIZE; | 1380 | aead_parms.ret_iv_len = GCM_RFC4106_IV_SIZE; |
1375 | aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE; | 1381 | aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE; |
1376 | } | 1382 | } |
1377 | } else { | 1383 | } else { |
@@ -1558,24 +1564,9 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx) | |||
1558 | if (err) | 1564 | if (err) |
1559 | return err; | 1565 | return err; |
1560 | 1566 | ||
1561 | err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg); | 1567 | err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx); |
1562 | if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) { | 1568 | if (unlikely(err < 0)) |
1563 | while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { | ||
1564 | /* | ||
1565 | * Mailbox queue is full. Since MAY_SLEEP is set, assume | ||
1566 | * not in atomic context and we can wait and try again. | ||
1567 | */ | ||
1568 | retry_cnt++; | ||
1569 | usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); | ||
1570 | err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], | ||
1571 | mssg); | ||
1572 | atomic_inc(&iproc_priv.mb_no_spc); | ||
1573 | } | ||
1574 | } | ||
1575 | if (err < 0) { | ||
1576 | atomic_inc(&iproc_priv.mb_send_fail); | ||
1577 | return err; | 1569 | return err; |
1578 | } | ||
1579 | 1570 | ||
1580 | return -EINPROGRESS; | 1571 | return -EINPROGRESS; |
1581 | } | 1572 | } |
@@ -3255,7 +3246,7 @@ static struct iproc_alg_s driver_algs[] = { | |||
3255 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK | 3246 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3256 | }, | 3247 | }, |
3257 | .setkey = aead_gcm_esp_setkey, | 3248 | .setkey = aead_gcm_esp_setkey, |
3258 | .ivsize = GCM_ESP_IV_SIZE, | 3249 | .ivsize = GCM_RFC4106_IV_SIZE, |
3259 | .maxauthsize = AES_BLOCK_SIZE, | 3250 | .maxauthsize = AES_BLOCK_SIZE, |
3260 | }, | 3251 | }, |
3261 | .cipher_info = { | 3252 | .cipher_info = { |
@@ -3301,7 +3292,7 @@ static struct iproc_alg_s driver_algs[] = { | |||
3301 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK | 3292 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3302 | }, | 3293 | }, |
3303 | .setkey = rfc4543_gcm_esp_setkey, | 3294 | .setkey = rfc4543_gcm_esp_setkey, |
3304 | .ivsize = GCM_ESP_IV_SIZE, | 3295 | .ivsize = GCM_RFC4106_IV_SIZE, |
3305 | .maxauthsize = AES_BLOCK_SIZE, | 3296 | .maxauthsize = AES_BLOCK_SIZE, |
3306 | }, | 3297 | }, |
3307 | .cipher_info = { | 3298 | .cipher_info = { |
@@ -4537,7 +4528,7 @@ static int spu_mb_init(struct device *dev) | |||
4537 | mcl->dev = dev; | 4528 | mcl->dev = dev; |
4538 | mcl->tx_block = false; | 4529 | mcl->tx_block = false; |
4539 | mcl->tx_tout = 0; | 4530 | mcl->tx_tout = 0; |
4540 | mcl->knows_txdone = false; | 4531 | mcl->knows_txdone = true; |
4541 | mcl->rx_callback = spu_rx_callback; | 4532 | mcl->rx_callback = spu_rx_callback; |
4542 | mcl->tx_done = NULL; | 4533 | mcl->tx_done = NULL; |
4543 | 4534 | ||
@@ -4818,7 +4809,6 @@ static int spu_dt_read(struct platform_device *pdev) | |||
4818 | struct device *dev = &pdev->dev; | 4809 | struct device *dev = &pdev->dev; |
4819 | struct spu_hw *spu = &iproc_priv.spu; | 4810 | struct spu_hw *spu = &iproc_priv.spu; |
4820 | struct resource *spu_ctrl_regs; | 4811 | struct resource *spu_ctrl_regs; |
4821 | const struct of_device_id *match; | ||
4822 | const struct spu_type_subtype *matched_spu_type; | 4812 | const struct spu_type_subtype *matched_spu_type; |
4823 | struct device_node *dn = pdev->dev.of_node; | 4813 | struct device_node *dn = pdev->dev.of_node; |
4824 | int err, i; | 4814 | int err, i; |
@@ -4826,14 +4816,12 @@ static int spu_dt_read(struct platform_device *pdev) | |||
4826 | /* Count number of mailbox channels */ | 4816 | /* Count number of mailbox channels */ |
4827 | spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells"); | 4817 | spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells"); |
4828 | 4818 | ||
4829 | match = of_match_device(of_match_ptr(bcm_spu_dt_ids), dev); | 4819 | matched_spu_type = of_device_get_match_data(dev); |
4830 | if (!match) { | 4820 | if (!matched_spu_type) { |
4831 | dev_err(&pdev->dev, "Failed to match device\n"); | 4821 | dev_err(&pdev->dev, "Failed to match device\n"); |
4832 | return -ENODEV; | 4822 | return -ENODEV; |
4833 | } | 4823 | } |
4834 | 4824 | ||
4835 | matched_spu_type = match->data; | ||
4836 | |||
4837 | spu->spu_type = matched_spu_type->type; | 4825 | spu->spu_type = matched_spu_type->type; |
4838 | spu->spu_subtype = matched_spu_type->subtype; | 4826 | spu->spu_subtype = matched_spu_type->subtype; |
4839 | 4827 | ||
diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h index 57a55eb2a255..763c425c41ca 100644 --- a/drivers/crypto/bcm/cipher.h +++ b/drivers/crypto/bcm/cipher.h | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <crypto/aes.h> | 23 | #include <crypto/aes.h> |
24 | #include <crypto/internal/hash.h> | 24 | #include <crypto/internal/hash.h> |
25 | #include <crypto/aead.h> | 25 | #include <crypto/aead.h> |
26 | #include <crypto/gcm.h> | ||
26 | #include <crypto/sha.h> | 27 | #include <crypto/sha.h> |
27 | #include <crypto/sha3.h> | 28 | #include <crypto/sha3.h> |
28 | 29 | ||
@@ -39,8 +40,6 @@ | |||
39 | #define ARC4_STATE_SIZE 4 | 40 | #define ARC4_STATE_SIZE 4 |
40 | 41 | ||
41 | #define CCM_AES_IV_SIZE 16 | 42 | #define CCM_AES_IV_SIZE 16 |
42 | #define GCM_AES_IV_SIZE 12 | ||
43 | #define GCM_ESP_IV_SIZE 8 | ||
44 | #define CCM_ESP_IV_SIZE 8 | 43 | #define CCM_ESP_IV_SIZE 8 |
45 | #define RFC4543_ICV_SIZE 16 | 44 | #define RFC4543_ICV_SIZE 16 |
46 | 45 | ||
diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c index 430c5570ea87..d543c010ccd9 100644 --- a/drivers/crypto/bcm/util.c +++ b/drivers/crypto/bcm/util.c | |||
@@ -271,7 +271,7 @@ int do_shash(unsigned char *name, unsigned char *result, | |||
271 | hash = crypto_alloc_shash(name, 0, 0); | 271 | hash = crypto_alloc_shash(name, 0, 0); |
272 | if (IS_ERR(hash)) { | 272 | if (IS_ERR(hash)) { |
273 | rc = PTR_ERR(hash); | 273 | rc = PTR_ERR(hash); |
274 | pr_err("%s: Crypto %s allocation error %d", __func__, name, rc); | 274 | pr_err("%s: Crypto %s allocation error %d\n", __func__, name, rc); |
275 | return rc; | 275 | return rc; |
276 | } | 276 | } |
277 | 277 | ||
@@ -279,7 +279,7 @@ int do_shash(unsigned char *name, unsigned char *result, | |||
279 | sdesc = kmalloc(size, GFP_KERNEL); | 279 | sdesc = kmalloc(size, GFP_KERNEL); |
280 | if (!sdesc) { | 280 | if (!sdesc) { |
281 | rc = -ENOMEM; | 281 | rc = -ENOMEM; |
282 | pr_err("%s: Memory allocation failure", __func__); | 282 | pr_err("%s: Memory allocation failure\n", __func__); |
283 | goto do_shash_err; | 283 | goto do_shash_err; |
284 | } | 284 | } |
285 | sdesc->shash.tfm = hash; | 285 | sdesc->shash.tfm = hash; |
@@ -288,31 +288,31 @@ int do_shash(unsigned char *name, unsigned char *result, | |||
288 | if (key_len > 0) { | 288 | if (key_len > 0) { |
289 | rc = crypto_shash_setkey(hash, key, key_len); | 289 | rc = crypto_shash_setkey(hash, key, key_len); |
290 | if (rc) { | 290 | if (rc) { |
291 | pr_err("%s: Could not setkey %s shash", __func__, name); | 291 | pr_err("%s: Could not setkey %s shash\n", __func__, name); |
292 | goto do_shash_err; | 292 | goto do_shash_err; |
293 | } | 293 | } |
294 | } | 294 | } |
295 | 295 | ||
296 | rc = crypto_shash_init(&sdesc->shash); | 296 | rc = crypto_shash_init(&sdesc->shash); |
297 | if (rc) { | 297 | if (rc) { |
298 | pr_err("%s: Could not init %s shash", __func__, name); | 298 | pr_err("%s: Could not init %s shash\n", __func__, name); |
299 | goto do_shash_err; | 299 | goto do_shash_err; |
300 | } | 300 | } |
301 | rc = crypto_shash_update(&sdesc->shash, data1, data1_len); | 301 | rc = crypto_shash_update(&sdesc->shash, data1, data1_len); |
302 | if (rc) { | 302 | if (rc) { |
303 | pr_err("%s: Could not update1", __func__); | 303 | pr_err("%s: Could not update1\n", __func__); |
304 | goto do_shash_err; | 304 | goto do_shash_err; |
305 | } | 305 | } |
306 | if (data2 && data2_len) { | 306 | if (data2 && data2_len) { |
307 | rc = crypto_shash_update(&sdesc->shash, data2, data2_len); | 307 | rc = crypto_shash_update(&sdesc->shash, data2, data2_len); |
308 | if (rc) { | 308 | if (rc) { |
309 | pr_err("%s: Could not update2", __func__); | 309 | pr_err("%s: Could not update2\n", __func__); |
310 | goto do_shash_err; | 310 | goto do_shash_err; |
311 | } | 311 | } |
312 | } | 312 | } |
313 | rc = crypto_shash_final(&sdesc->shash, result); | 313 | rc = crypto_shash_final(&sdesc->shash, result); |
314 | if (rc) | 314 | if (rc) |
315 | pr_err("%s: Could not generate %s hash", __func__, name); | 315 | pr_err("%s: Could not generate %s hash\n", __func__, name); |
316 | 316 | ||
317 | do_shash_err: | 317 | do_shash_err: |
318 | crypto_free_shash(hash); | 318 | crypto_free_shash(hash); |
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 54f3b375a453..baa8dd52472d 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
@@ -992,7 +992,7 @@ static void init_gcm_job(struct aead_request *req, | |||
992 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 992 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
993 | unsigned int ivsize = crypto_aead_ivsize(aead); | 993 | unsigned int ivsize = crypto_aead_ivsize(aead); |
994 | u32 *desc = edesc->hw_desc; | 994 | u32 *desc = edesc->hw_desc; |
995 | bool generic_gcm = (ivsize == 12); | 995 | bool generic_gcm = (ivsize == GCM_AES_IV_SIZE); |
996 | unsigned int last; | 996 | unsigned int last; |
997 | 997 | ||
998 | init_aead_job(req, edesc, all_contig, encrypt); | 998 | init_aead_job(req, edesc, all_contig, encrypt); |
@@ -1004,7 +1004,7 @@ static void init_gcm_job(struct aead_request *req, | |||
1004 | 1004 | ||
1005 | /* Read GCM IV */ | 1005 | /* Read GCM IV */ |
1006 | append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | | 1006 | append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | |
1007 | FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last); | 1007 | FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | GCM_AES_IV_SIZE | last); |
1008 | /* Append Salt */ | 1008 | /* Append Salt */ |
1009 | if (!generic_gcm) | 1009 | if (!generic_gcm) |
1010 | append_data(desc, ctx->key + ctx->cdata.keylen, 4); | 1010 | append_data(desc, ctx->key + ctx->cdata.keylen, 4); |
@@ -1953,7 +1953,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
1953 | .setauthsize = rfc4106_setauthsize, | 1953 | .setauthsize = rfc4106_setauthsize, |
1954 | .encrypt = ipsec_gcm_encrypt, | 1954 | .encrypt = ipsec_gcm_encrypt, |
1955 | .decrypt = ipsec_gcm_decrypt, | 1955 | .decrypt = ipsec_gcm_decrypt, |
1956 | .ivsize = 8, | 1956 | .ivsize = GCM_RFC4106_IV_SIZE, |
1957 | .maxauthsize = AES_BLOCK_SIZE, | 1957 | .maxauthsize = AES_BLOCK_SIZE, |
1958 | }, | 1958 | }, |
1959 | .caam = { | 1959 | .caam = { |
@@ -1971,7 +1971,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
1971 | .setauthsize = rfc4543_setauthsize, | 1971 | .setauthsize = rfc4543_setauthsize, |
1972 | .encrypt = ipsec_gcm_encrypt, | 1972 | .encrypt = ipsec_gcm_encrypt, |
1973 | .decrypt = ipsec_gcm_decrypt, | 1973 | .decrypt = ipsec_gcm_decrypt, |
1974 | .ivsize = 8, | 1974 | .ivsize = GCM_RFC4543_IV_SIZE, |
1975 | .maxauthsize = AES_BLOCK_SIZE, | 1975 | .maxauthsize = AES_BLOCK_SIZE, |
1976 | }, | 1976 | }, |
1977 | .caam = { | 1977 | .caam = { |
@@ -1990,7 +1990,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
1990 | .setauthsize = gcm_setauthsize, | 1990 | .setauthsize = gcm_setauthsize, |
1991 | .encrypt = gcm_encrypt, | 1991 | .encrypt = gcm_encrypt, |
1992 | .decrypt = gcm_decrypt, | 1992 | .decrypt = gcm_decrypt, |
1993 | .ivsize = 12, | 1993 | .ivsize = GCM_AES_IV_SIZE, |
1994 | .maxauthsize = AES_BLOCK_SIZE, | 1994 | .maxauthsize = AES_BLOCK_SIZE, |
1995 | }, | 1995 | }, |
1996 | .caam = { | 1996 | .caam = { |
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 2eefc4a26bc2..f9f08fce4356 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c | |||
@@ -7,7 +7,7 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include "compat.h" | 9 | #include "compat.h" |
10 | 10 | #include "ctrl.h" | |
11 | #include "regs.h" | 11 | #include "regs.h" |
12 | #include "intern.h" | 12 | #include "intern.h" |
13 | #include "desc_constr.h" | 13 | #include "desc_constr.h" |
@@ -2312,6 +2312,11 @@ static int __init caam_qi_algapi_init(void) | |||
2312 | if (!priv || !priv->qi_present) | 2312 | if (!priv || !priv->qi_present) |
2313 | return -ENODEV; | 2313 | return -ENODEV; |
2314 | 2314 | ||
2315 | if (caam_dpaa2) { | ||
2316 | dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n"); | ||
2317 | return -ENODEV; | ||
2318 | } | ||
2319 | |||
2315 | INIT_LIST_HEAD(&alg_list); | 2320 | INIT_LIST_HEAD(&alg_list); |
2316 | 2321 | ||
2317 | /* | 2322 | /* |
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 698580b60b2f..616720a04e7a 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c | |||
@@ -218,7 +218,7 @@ static inline int buf_map_to_sec4_sg(struct device *jrdev, | |||
218 | } | 218 | } |
219 | 219 | ||
220 | /* Map state->caam_ctx, and add it to link table */ | 220 | /* Map state->caam_ctx, and add it to link table */ |
221 | static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev, | 221 | static inline int ctx_map_to_sec4_sg(struct device *jrdev, |
222 | struct caam_hash_state *state, int ctx_len, | 222 | struct caam_hash_state *state, int ctx_len, |
223 | struct sec4_sg_entry *sec4_sg, u32 flag) | 223 | struct sec4_sg_entry *sec4_sg, u32 flag) |
224 | { | 224 | { |
@@ -773,7 +773,7 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
773 | edesc->src_nents = src_nents; | 773 | edesc->src_nents = src_nents; |
774 | edesc->sec4_sg_bytes = sec4_sg_bytes; | 774 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
775 | 775 | ||
776 | ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, | 776 | ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, |
777 | edesc->sec4_sg, DMA_BIDIRECTIONAL); | 777 | edesc->sec4_sg, DMA_BIDIRECTIONAL); |
778 | if (ret) | 778 | if (ret) |
779 | goto unmap_ctx; | 779 | goto unmap_ctx; |
@@ -871,9 +871,8 @@ static int ahash_final_ctx(struct ahash_request *req) | |||
871 | desc = edesc->hw_desc; | 871 | desc = edesc->hw_desc; |
872 | 872 | ||
873 | edesc->sec4_sg_bytes = sec4_sg_bytes; | 873 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
874 | edesc->src_nents = 0; | ||
875 | 874 | ||
876 | ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, | 875 | ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, |
877 | edesc->sec4_sg, DMA_TO_DEVICE); | 876 | edesc->sec4_sg, DMA_TO_DEVICE); |
878 | if (ret) | 877 | if (ret) |
879 | goto unmap_ctx; | 878 | goto unmap_ctx; |
@@ -967,7 +966,7 @@ static int ahash_finup_ctx(struct ahash_request *req) | |||
967 | 966 | ||
968 | edesc->src_nents = src_nents; | 967 | edesc->src_nents = src_nents; |
969 | 968 | ||
970 | ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, | 969 | ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, |
971 | edesc->sec4_sg, DMA_TO_DEVICE); | 970 | edesc->sec4_sg, DMA_TO_DEVICE); |
972 | if (ret) | 971 | if (ret) |
973 | goto unmap_ctx; | 972 | goto unmap_ctx; |
@@ -1123,7 +1122,6 @@ static int ahash_final_no_ctx(struct ahash_request *req) | |||
1123 | dev_err(jrdev, "unable to map dst\n"); | 1122 | dev_err(jrdev, "unable to map dst\n"); |
1124 | goto unmap; | 1123 | goto unmap; |
1125 | } | 1124 | } |
1126 | edesc->src_nents = 0; | ||
1127 | 1125 | ||
1128 | #ifdef DEBUG | 1126 | #ifdef DEBUG |
1129 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 1127 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
@@ -1205,7 +1203,6 @@ static int ahash_update_no_ctx(struct ahash_request *req) | |||
1205 | 1203 | ||
1206 | edesc->src_nents = src_nents; | 1204 | edesc->src_nents = src_nents; |
1207 | edesc->sec4_sg_bytes = sec4_sg_bytes; | 1205 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
1208 | edesc->dst_dma = 0; | ||
1209 | 1206 | ||
1210 | ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); | 1207 | ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); |
1211 | if (ret) | 1208 | if (ret) |
@@ -1417,7 +1414,6 @@ static int ahash_update_first(struct ahash_request *req) | |||
1417 | } | 1414 | } |
1418 | 1415 | ||
1419 | edesc->src_nents = src_nents; | 1416 | edesc->src_nents = src_nents; |
1420 | edesc->dst_dma = 0; | ||
1421 | 1417 | ||
1422 | ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, | 1418 | ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, |
1423 | to_hash); | 1419 | to_hash); |
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h index 5f2f1b288d37..1c71e0cd5098 100644 --- a/drivers/crypto/caam/compat.h +++ b/drivers/crypto/caam/compat.h | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <crypto/aes.h> | 32 | #include <crypto/aes.h> |
33 | #include <crypto/ctr.h> | 33 | #include <crypto/ctr.h> |
34 | #include <crypto/des.h> | 34 | #include <crypto/des.h> |
35 | #include <crypto/gcm.h> | ||
35 | #include <crypto/sha.h> | 36 | #include <crypto/sha.h> |
36 | #include <crypto/md5.h> | 37 | #include <crypto/md5.h> |
37 | #include <crypto/internal/aead.h> | 38 | #include <crypto/internal/aead.h> |
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h index 6633fbb80e74..8142de7ba050 100644 --- a/drivers/crypto/caam/desc.h +++ b/drivers/crypto/caam/desc.h | |||
@@ -1440,7 +1440,7 @@ | |||
1440 | #define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT) | 1440 | #define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT) |
1441 | #define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT) | 1441 | #define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT) |
1442 | #define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT) | 1442 | #define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT) |
1443 | #define MATH_SRC1_DPOVRD (0x07 << MATH_SRC0_SHIFT) | 1443 | #define MATH_SRC1_DPOVRD (0x07 << MATH_SRC1_SHIFT) |
1444 | #define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT) | 1444 | #define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT) |
1445 | #define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT) | 1445 | #define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT) |
1446 | #define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT) | 1446 | #define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT) |
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.c b/drivers/crypto/cavium/nitrox/nitrox_hal.c index bf9900bc4804..ab4ccf2f9e77 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_hal.c +++ b/drivers/crypto/cavium/nitrox/nitrox_hal.c | |||
@@ -127,7 +127,7 @@ void nitrox_config_pkt_input_rings(struct nitrox_device *ndev) | |||
127 | * size and interrupt threshold. | 127 | * size and interrupt threshold. |
128 | */ | 128 | */ |
129 | offset = NPS_PKT_IN_INSTR_BADDRX(i); | 129 | offset = NPS_PKT_IN_INSTR_BADDRX(i); |
130 | nitrox_write_csr(ndev, NPS_PKT_IN_INSTR_BADDRX(i), cmdq->dma); | 130 | nitrox_write_csr(ndev, offset, cmdq->dma); |
131 | 131 | ||
132 | /* configure ring size */ | 132 | /* configure ring size */ |
133 | offset = NPS_PKT_IN_INSTR_RSIZEX(i); | 133 | offset = NPS_PKT_IN_INSTR_RSIZEX(i); |
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c index 52313524a4dd..ff02b713c6f6 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c | |||
@@ -19,13 +19,12 @@ | |||
19 | #include <crypto/algapi.h> | 19 | #include <crypto/algapi.h> |
20 | #include <crypto/aes.h> | 20 | #include <crypto/aes.h> |
21 | #include <crypto/ctr.h> | 21 | #include <crypto/ctr.h> |
22 | #include <crypto/gcm.h> | ||
22 | #include <crypto/scatterwalk.h> | 23 | #include <crypto/scatterwalk.h> |
23 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
24 | 25 | ||
25 | #include "ccp-crypto.h" | 26 | #include "ccp-crypto.h" |
26 | 27 | ||
27 | #define AES_GCM_IVSIZE 12 | ||
28 | |||
29 | static int ccp_aes_gcm_complete(struct crypto_async_request *async_req, int ret) | 28 | static int ccp_aes_gcm_complete(struct crypto_async_request *async_req, int ret) |
30 | { | 29 | { |
31 | return ret; | 30 | return ret; |
@@ -95,9 +94,9 @@ static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt) | |||
95 | */ | 94 | */ |
96 | 95 | ||
97 | /* Prepare the IV: 12 bytes + an integer (counter) */ | 96 | /* Prepare the IV: 12 bytes + an integer (counter) */ |
98 | memcpy(rctx->iv, req->iv, AES_GCM_IVSIZE); | 97 | memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE); |
99 | for (i = 0; i < 3; i++) | 98 | for (i = 0; i < 3; i++) |
100 | rctx->iv[i + AES_GCM_IVSIZE] = 0; | 99 | rctx->iv[i + GCM_AES_IV_SIZE] = 0; |
101 | rctx->iv[AES_BLOCK_SIZE - 1] = 1; | 100 | rctx->iv[AES_BLOCK_SIZE - 1] = 1; |
102 | 101 | ||
103 | /* Set up a scatterlist for the IV */ | 102 | /* Set up a scatterlist for the IV */ |
@@ -160,7 +159,7 @@ static struct aead_alg ccp_aes_gcm_defaults = { | |||
160 | .encrypt = ccp_aes_gcm_encrypt, | 159 | .encrypt = ccp_aes_gcm_encrypt, |
161 | .decrypt = ccp_aes_gcm_decrypt, | 160 | .decrypt = ccp_aes_gcm_decrypt, |
162 | .init = ccp_aes_gcm_cra_init, | 161 | .init = ccp_aes_gcm_cra_init, |
163 | .ivsize = AES_GCM_IVSIZE, | 162 | .ivsize = GCM_AES_IV_SIZE, |
164 | .maxauthsize = AES_BLOCK_SIZE, | 163 | .maxauthsize = AES_BLOCK_SIZE, |
165 | .base = { | 164 | .base = { |
166 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | 165 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index 35a9de7fd475..b95d19974aa6 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c | |||
@@ -222,9 +222,10 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd) | |||
222 | 222 | ||
223 | /* Check if the cmd can/should be queued */ | 223 | /* Check if the cmd can/should be queued */ |
224 | if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { | 224 | if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { |
225 | ret = -EBUSY; | 225 | if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) { |
226 | if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) | 226 | ret = -ENOSPC; |
227 | goto e_lock; | 227 | goto e_lock; |
228 | } | ||
228 | } | 229 | } |
229 | 230 | ||
230 | /* Look for an entry with the same tfm. If there is a cmd | 231 | /* Look for an entry with the same tfm. If there is a cmd |
@@ -243,9 +244,6 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd) | |||
243 | ret = ccp_enqueue_cmd(crypto_cmd->cmd); | 244 | ret = ccp_enqueue_cmd(crypto_cmd->cmd); |
244 | if (!ccp_crypto_success(ret)) | 245 | if (!ccp_crypto_success(ret)) |
245 | goto e_lock; /* Error, don't queue it */ | 246 | goto e_lock; /* Error, don't queue it */ |
246 | if ((ret == -EBUSY) && | ||
247 | !(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) | ||
248 | goto e_lock; /* Not backlogging, don't queue it */ | ||
249 | } | 247 | } |
250 | 248 | ||
251 | if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { | 249 | if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { |
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 65604fc65e8f..44a4d2779b15 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c | |||
@@ -788,13 +788,12 @@ static int ccp5_init(struct ccp_device *ccp) | |||
788 | struct ccp_cmd_queue *cmd_q; | 788 | struct ccp_cmd_queue *cmd_q; |
789 | struct dma_pool *dma_pool; | 789 | struct dma_pool *dma_pool; |
790 | char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; | 790 | char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; |
791 | unsigned int qmr, qim, i; | 791 | unsigned int qmr, i; |
792 | u64 status; | 792 | u64 status; |
793 | u32 status_lo, status_hi; | 793 | u32 status_lo, status_hi; |
794 | int ret; | 794 | int ret; |
795 | 795 | ||
796 | /* Find available queues */ | 796 | /* Find available queues */ |
797 | qim = 0; | ||
798 | qmr = ioread32(ccp->io_regs + Q_MASK_REG); | 797 | qmr = ioread32(ccp->io_regs + Q_MASK_REG); |
799 | for (i = 0; i < MAX_HW_QUEUES; i++) { | 798 | for (i = 0; i < MAX_HW_QUEUES; i++) { |
800 | 799 | ||
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 4e029b176641..1b5035d56288 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c | |||
@@ -292,9 +292,12 @@ int ccp_enqueue_cmd(struct ccp_cmd *cmd) | |||
292 | i = ccp->cmd_q_count; | 292 | i = ccp->cmd_q_count; |
293 | 293 | ||
294 | if (ccp->cmd_count >= MAX_CMD_QLEN) { | 294 | if (ccp->cmd_count >= MAX_CMD_QLEN) { |
295 | ret = -EBUSY; | 295 | if (cmd->flags & CCP_CMD_MAY_BACKLOG) { |
296 | if (cmd->flags & CCP_CMD_MAY_BACKLOG) | 296 | ret = -EBUSY; |
297 | list_add_tail(&cmd->entry, &ccp->backlog); | 297 | list_add_tail(&cmd->entry, &ccp->backlog); |
298 | } else { | ||
299 | ret = -ENOSPC; | ||
300 | } | ||
298 | } else { | 301 | } else { |
299 | ret = -EINPROGRESS; | 302 | ret = -EINPROGRESS; |
300 | ccp->cmd_count++; | 303 | ccp->cmd_count++; |
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c index d608043c0280..8b9da58459df 100644 --- a/drivers/crypto/ccp/ccp-dmaengine.c +++ b/drivers/crypto/ccp/ccp-dmaengine.c | |||
@@ -223,6 +223,7 @@ static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan, | |||
223 | desc->tx_desc.cookie, desc->status); | 223 | desc->tx_desc.cookie, desc->status); |
224 | 224 | ||
225 | dma_cookie_complete(tx_desc); | 225 | dma_cookie_complete(tx_desc); |
226 | dma_descriptor_unmap(tx_desc); | ||
226 | } | 227 | } |
227 | 228 | ||
228 | desc = __ccp_next_dma_desc(chan, desc); | 229 | desc = __ccp_next_dma_desc(chan, desc); |
@@ -230,9 +231,7 @@ static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan, | |||
230 | spin_unlock_irqrestore(&chan->lock, flags); | 231 | spin_unlock_irqrestore(&chan->lock, flags); |
231 | 232 | ||
232 | if (tx_desc) { | 233 | if (tx_desc) { |
233 | if (tx_desc->callback && | 234 | dmaengine_desc_get_callback_invoke(tx_desc, NULL); |
234 | (tx_desc->flags & DMA_PREP_INTERRUPT)) | ||
235 | tx_desc->callback(tx_desc->callback_param); | ||
236 | 235 | ||
237 | dma_run_dependencies(tx_desc); | 236 | dma_run_dependencies(tx_desc); |
238 | } | 237 | } |
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 0e8160701833..4eed7171e2ae 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c | |||
@@ -53,6 +53,7 @@ | |||
53 | #include <crypto/aes.h> | 53 | #include <crypto/aes.h> |
54 | #include <crypto/algapi.h> | 54 | #include <crypto/algapi.h> |
55 | #include <crypto/hash.h> | 55 | #include <crypto/hash.h> |
56 | #include <crypto/gcm.h> | ||
56 | #include <crypto/sha.h> | 57 | #include <crypto/sha.h> |
57 | #include <crypto/authenc.h> | 58 | #include <crypto/authenc.h> |
58 | #include <crypto/ctr.h> | 59 | #include <crypto/ctr.h> |
@@ -70,6 +71,8 @@ | |||
70 | #include "chcr_algo.h" | 71 | #include "chcr_algo.h" |
71 | #include "chcr_crypto.h" | 72 | #include "chcr_crypto.h" |
72 | 73 | ||
74 | #define IV AES_BLOCK_SIZE | ||
75 | |||
73 | static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx) | 76 | static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx) |
74 | { | 77 | { |
75 | return ctx->crypto_ctx->aeadctx; | 78 | return ctx->crypto_ctx->aeadctx; |
@@ -102,7 +105,7 @@ static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx) | |||
102 | 105 | ||
103 | static inline int is_ofld_imm(const struct sk_buff *skb) | 106 | static inline int is_ofld_imm(const struct sk_buff *skb) |
104 | { | 107 | { |
105 | return (skb->len <= CRYPTO_MAX_IMM_TX_PKT_LEN); | 108 | return (skb->len <= SGE_MAX_WR_LEN); |
106 | } | 109 | } |
107 | 110 | ||
108 | /* | 111 | /* |
@@ -117,6 +120,92 @@ static inline unsigned int sgl_len(unsigned int n) | |||
117 | return (3 * n) / 2 + (n & 1) + 2; | 120 | return (3 * n) / 2 + (n & 1) + 2; |
118 | } | 121 | } |
119 | 122 | ||
123 | static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen, | ||
124 | unsigned int entlen, | ||
125 | unsigned int skip) | ||
126 | { | ||
127 | int nents = 0; | ||
128 | unsigned int less; | ||
129 | unsigned int skip_len = 0; | ||
130 | |||
131 | while (sg && skip) { | ||
132 | if (sg_dma_len(sg) <= skip) { | ||
133 | skip -= sg_dma_len(sg); | ||
134 | skip_len = 0; | ||
135 | sg = sg_next(sg); | ||
136 | } else { | ||
137 | skip_len = skip; | ||
138 | skip = 0; | ||
139 | } | ||
140 | } | ||
141 | |||
142 | while (sg && reqlen) { | ||
143 | less = min(reqlen, sg_dma_len(sg) - skip_len); | ||
144 | nents += DIV_ROUND_UP(less, entlen); | ||
145 | reqlen -= less; | ||
146 | skip_len = 0; | ||
147 | sg = sg_next(sg); | ||
148 | } | ||
149 | return nents; | ||
150 | } | ||
151 | |||
152 | static inline void chcr_handle_ahash_resp(struct ahash_request *req, | ||
153 | unsigned char *input, | ||
154 | int err) | ||
155 | { | ||
156 | struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); | ||
157 | int digestsize, updated_digestsize; | ||
158 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
159 | struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm)); | ||
160 | |||
161 | if (input == NULL) | ||
162 | goto out; | ||
163 | reqctx = ahash_request_ctx(req); | ||
164 | digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); | ||
165 | if (reqctx->is_sg_map) | ||
166 | chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); | ||
167 | if (reqctx->dma_addr) | ||
168 | dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->dma_addr, | ||
169 | reqctx->dma_len, DMA_TO_DEVICE); | ||
170 | reqctx->dma_addr = 0; | ||
171 | updated_digestsize = digestsize; | ||
172 | if (digestsize == SHA224_DIGEST_SIZE) | ||
173 | updated_digestsize = SHA256_DIGEST_SIZE; | ||
174 | else if (digestsize == SHA384_DIGEST_SIZE) | ||
175 | updated_digestsize = SHA512_DIGEST_SIZE; | ||
176 | if (reqctx->result == 1) { | ||
177 | reqctx->result = 0; | ||
178 | memcpy(req->result, input + sizeof(struct cpl_fw6_pld), | ||
179 | digestsize); | ||
180 | } else { | ||
181 | memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld), | ||
182 | updated_digestsize); | ||
183 | } | ||
184 | out: | ||
185 | req->base.complete(&req->base, err); | ||
186 | |||
187 | } | ||
188 | |||
189 | static inline void chcr_handle_aead_resp(struct aead_request *req, | ||
190 | unsigned char *input, | ||
191 | int err) | ||
192 | { | ||
193 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | ||
194 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
195 | struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm)); | ||
196 | |||
197 | |||
198 | chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op); | ||
199 | if (reqctx->b0_dma) | ||
200 | dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma, | ||
201 | reqctx->b0_len, DMA_BIDIRECTIONAL); | ||
202 | if (reqctx->verify == VERIFY_SW) { | ||
203 | chcr_verify_tag(req, input, &err); | ||
204 | reqctx->verify = VERIFY_HW; | ||
205 | } | ||
206 | req->base.complete(&req->base, err); | ||
207 | |||
208 | } | ||
120 | static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err) | 209 | static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err) |
121 | { | 210 | { |
122 | u8 temp[SHA512_DIGEST_SIZE]; | 211 | u8 temp[SHA512_DIGEST_SIZE]; |
@@ -151,29 +240,11 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, | |||
151 | { | 240 | { |
152 | struct crypto_tfm *tfm = req->tfm; | 241 | struct crypto_tfm *tfm = req->tfm; |
153 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); | 242 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); |
154 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | ||
155 | struct chcr_req_ctx ctx_req; | ||
156 | unsigned int digestsize, updated_digestsize; | ||
157 | struct adapter *adap = padap(ctx->dev); | 243 | struct adapter *adap = padap(ctx->dev); |
158 | 244 | ||
159 | switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { | 245 | switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { |
160 | case CRYPTO_ALG_TYPE_AEAD: | 246 | case CRYPTO_ALG_TYPE_AEAD: |
161 | ctx_req.req.aead_req = aead_request_cast(req); | 247 | chcr_handle_aead_resp(aead_request_cast(req), input, err); |
162 | ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req); | ||
163 | dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst, | ||
164 | ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE); | ||
165 | if (ctx_req.ctx.reqctx->skb) { | ||
166 | kfree_skb(ctx_req.ctx.reqctx->skb); | ||
167 | ctx_req.ctx.reqctx->skb = NULL; | ||
168 | } | ||
169 | free_new_sg(ctx_req.ctx.reqctx->newdstsg); | ||
170 | ctx_req.ctx.reqctx->newdstsg = NULL; | ||
171 | if (ctx_req.ctx.reqctx->verify == VERIFY_SW) { | ||
172 | chcr_verify_tag(ctx_req.req.aead_req, input, | ||
173 | &err); | ||
174 | ctx_req.ctx.reqctx->verify = VERIFY_HW; | ||
175 | } | ||
176 | ctx_req.req.aead_req->base.complete(req, err); | ||
177 | break; | 248 | break; |
178 | 249 | ||
179 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | 250 | case CRYPTO_ALG_TYPE_ABLKCIPHER: |
@@ -182,60 +253,13 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, | |||
182 | break; | 253 | break; |
183 | 254 | ||
184 | case CRYPTO_ALG_TYPE_AHASH: | 255 | case CRYPTO_ALG_TYPE_AHASH: |
185 | ctx_req.req.ahash_req = ahash_request_cast(req); | 256 | chcr_handle_ahash_resp(ahash_request_cast(req), input, err); |
186 | ctx_req.ctx.ahash_ctx = | ||
187 | ahash_request_ctx(ctx_req.req.ahash_req); | ||
188 | digestsize = | ||
189 | crypto_ahash_digestsize(crypto_ahash_reqtfm( | ||
190 | ctx_req.req.ahash_req)); | ||
191 | updated_digestsize = digestsize; | ||
192 | if (digestsize == SHA224_DIGEST_SIZE) | ||
193 | updated_digestsize = SHA256_DIGEST_SIZE; | ||
194 | else if (digestsize == SHA384_DIGEST_SIZE) | ||
195 | updated_digestsize = SHA512_DIGEST_SIZE; | ||
196 | if (ctx_req.ctx.ahash_ctx->skb) { | ||
197 | kfree_skb(ctx_req.ctx.ahash_ctx->skb); | ||
198 | ctx_req.ctx.ahash_ctx->skb = NULL; | ||
199 | } | ||
200 | if (ctx_req.ctx.ahash_ctx->result == 1) { | ||
201 | ctx_req.ctx.ahash_ctx->result = 0; | ||
202 | memcpy(ctx_req.req.ahash_req->result, input + | ||
203 | sizeof(struct cpl_fw6_pld), | ||
204 | digestsize); | ||
205 | } else { | ||
206 | memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input + | ||
207 | sizeof(struct cpl_fw6_pld), | ||
208 | updated_digestsize); | ||
209 | } | 257 | } |
210 | ctx_req.req.ahash_req->base.complete(req, err); | ||
211 | break; | ||
212 | } | ||
213 | atomic_inc(&adap->chcr_stats.complete); | 258 | atomic_inc(&adap->chcr_stats.complete); |
214 | return err; | 259 | return err; |
215 | } | 260 | } |
216 | 261 | ||
217 | /* | 262 | static void get_aes_decrypt_key(unsigned char *dec_key, |
218 | * calc_tx_flits_ofld - calculate # of flits for an offload packet | ||
219 | * @skb: the packet | ||
220 | * Returns the number of flits needed for the given offload packet. | ||
221 | * These packets are already fully constructed and no additional headers | ||
222 | * will be added. | ||
223 | */ | ||
224 | static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) | ||
225 | { | ||
226 | unsigned int flits, cnt; | ||
227 | |||
228 | if (is_ofld_imm(skb)) | ||
229 | return DIV_ROUND_UP(skb->len, 8); | ||
230 | |||
231 | flits = skb_transport_offset(skb) / 8; /* headers */ | ||
232 | cnt = skb_shinfo(skb)->nr_frags; | ||
233 | if (skb_tail_pointer(skb) != skb_transport_header(skb)) | ||
234 | cnt++; | ||
235 | return flits + sgl_len(cnt); | ||
236 | } | ||
237 | |||
238 | static inline void get_aes_decrypt_key(unsigned char *dec_key, | ||
239 | const unsigned char *key, | 263 | const unsigned char *key, |
240 | unsigned int keylength) | 264 | unsigned int keylength) |
241 | { | 265 | { |
@@ -382,13 +406,19 @@ static inline int is_hmac(struct crypto_tfm *tfm) | |||
382 | return 0; | 406 | return 0; |
383 | } | 407 | } |
384 | 408 | ||
385 | static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl, | 409 | static inline void dsgl_walk_init(struct dsgl_walk *walk, |
386 | struct scatterlist *sg, | 410 | struct cpl_rx_phys_dsgl *dsgl) |
387 | struct phys_sge_parm *sg_param) | ||
388 | { | 411 | { |
389 | struct phys_sge_pairs *to; | 412 | walk->dsgl = dsgl; |
390 | unsigned int len = 0, left_size = sg_param->obsize; | 413 | walk->nents = 0; |
391 | unsigned int nents = sg_param->nents, i, j = 0; | 414 | walk->to = (struct phys_sge_pairs *)(dsgl + 1); |
415 | } | ||
416 | |||
417 | static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid) | ||
418 | { | ||
419 | struct cpl_rx_phys_dsgl *phys_cpl; | ||
420 | |||
421 | phys_cpl = walk->dsgl; | ||
392 | 422 | ||
393 | phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL) | 423 | phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL) |
394 | | CPL_RX_PHYS_DSGL_ISRDMA_V(0)); | 424 | | CPL_RX_PHYS_DSGL_ISRDMA_V(0)); |
@@ -398,38 +428,171 @@ static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl, | |||
398 | CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) | | 428 | CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) | |
399 | CPL_RX_PHYS_DSGL_PCITPHNT_V(0) | | 429 | CPL_RX_PHYS_DSGL_PCITPHNT_V(0) | |
400 | CPL_RX_PHYS_DSGL_DCAID_V(0) | | 430 | CPL_RX_PHYS_DSGL_DCAID_V(0) | |
401 | CPL_RX_PHYS_DSGL_NOOFSGENTR_V(nents)); | 431 | CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents)); |
402 | phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR; | 432 | phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR; |
403 | phys_cpl->rss_hdr_int.qid = htons(sg_param->qid); | 433 | phys_cpl->rss_hdr_int.qid = htons(qid); |
404 | phys_cpl->rss_hdr_int.hash_val = 0; | 434 | phys_cpl->rss_hdr_int.hash_val = 0; |
405 | to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl + | 435 | } |
406 | sizeof(struct cpl_rx_phys_dsgl)); | 436 | |
407 | for (i = 0; nents && left_size; to++) { | 437 | static inline void dsgl_walk_add_page(struct dsgl_walk *walk, |
408 | for (j = 0; j < 8 && nents && left_size; j++, nents--) { | 438 | size_t size, |
409 | len = min(left_size, sg_dma_len(sg)); | 439 | dma_addr_t *addr) |
410 | to->len[j] = htons(len); | 440 | { |
411 | to->addr[j] = cpu_to_be64(sg_dma_address(sg)); | 441 | int j; |
412 | left_size -= len; | 442 | |
443 | if (!size) | ||
444 | return; | ||
445 | j = walk->nents; | ||
446 | walk->to->len[j % 8] = htons(size); | ||
447 | walk->to->addr[j % 8] = cpu_to_be64(*addr); | ||
448 | j++; | ||
449 | if ((j % 8) == 0) | ||
450 | walk->to++; | ||
451 | walk->nents = j; | ||
452 | } | ||
453 | |||
454 | static void dsgl_walk_add_sg(struct dsgl_walk *walk, | ||
455 | struct scatterlist *sg, | ||
456 | unsigned int slen, | ||
457 | unsigned int skip) | ||
458 | { | ||
459 | int skip_len = 0; | ||
460 | unsigned int left_size = slen, len = 0; | ||
461 | unsigned int j = walk->nents; | ||
462 | int offset, ent_len; | ||
463 | |||
464 | if (!slen) | ||
465 | return; | ||
466 | while (sg && skip) { | ||
467 | if (sg_dma_len(sg) <= skip) { | ||
468 | skip -= sg_dma_len(sg); | ||
469 | skip_len = 0; | ||
413 | sg = sg_next(sg); | 470 | sg = sg_next(sg); |
471 | } else { | ||
472 | skip_len = skip; | ||
473 | skip = 0; | ||
414 | } | 474 | } |
415 | } | 475 | } |
476 | |||
477 | while (left_size && sg) { | ||
478 | len = min_t(u32, left_size, sg_dma_len(sg) - skip_len); | ||
479 | offset = 0; | ||
480 | while (len) { | ||
481 | ent_len = min_t(u32, len, CHCR_DST_SG_SIZE); | ||
482 | walk->to->len[j % 8] = htons(ent_len); | ||
483 | walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) + | ||
484 | offset + skip_len); | ||
485 | offset += ent_len; | ||
486 | len -= ent_len; | ||
487 | j++; | ||
488 | if ((j % 8) == 0) | ||
489 | walk->to++; | ||
490 | } | ||
491 | walk->last_sg = sg; | ||
492 | walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) - | ||
493 | skip_len) + skip_len; | ||
494 | left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len); | ||
495 | skip_len = 0; | ||
496 | sg = sg_next(sg); | ||
497 | } | ||
498 | walk->nents = j; | ||
499 | } | ||
500 | |||
501 | static inline void ulptx_walk_init(struct ulptx_walk *walk, | ||
502 | struct ulptx_sgl *ulp) | ||
503 | { | ||
504 | walk->sgl = ulp; | ||
505 | walk->nents = 0; | ||
506 | walk->pair_idx = 0; | ||
507 | walk->pair = ulp->sge; | ||
508 | walk->last_sg = NULL; | ||
509 | walk->last_sg_len = 0; | ||
416 | } | 510 | } |
417 | 511 | ||
418 | static inline int map_writesg_phys_cpl(struct device *dev, | 512 | static inline void ulptx_walk_end(struct ulptx_walk *walk) |
419 | struct cpl_rx_phys_dsgl *phys_cpl, | 513 | { |
514 | walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | | ||
515 | ULPTX_NSGE_V(walk->nents)); | ||
516 | } | ||
517 | |||
518 | |||
519 | static inline void ulptx_walk_add_page(struct ulptx_walk *walk, | ||
520 | size_t size, | ||
521 | dma_addr_t *addr) | ||
522 | { | ||
523 | if (!size) | ||
524 | return; | ||
525 | |||
526 | if (walk->nents == 0) { | ||
527 | walk->sgl->len0 = cpu_to_be32(size); | ||
528 | walk->sgl->addr0 = cpu_to_be64(*addr); | ||
529 | } else { | ||
530 | walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr); | ||
531 | walk->pair->len[walk->pair_idx] = cpu_to_be32(size); | ||
532 | walk->pair_idx = !walk->pair_idx; | ||
533 | if (!walk->pair_idx) | ||
534 | walk->pair++; | ||
535 | } | ||
536 | walk->nents++; | ||
537 | } | ||
538 | |||
539 | static void ulptx_walk_add_sg(struct ulptx_walk *walk, | ||
420 | struct scatterlist *sg, | 540 | struct scatterlist *sg, |
421 | struct phys_sge_parm *sg_param) | 541 | unsigned int len, |
542 | unsigned int skip) | ||
422 | { | 543 | { |
423 | if (!sg || !sg_param->nents) | 544 | int small; |
424 | return -EINVAL; | 545 | int skip_len = 0; |
546 | unsigned int sgmin; | ||
425 | 547 | ||
426 | sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE); | 548 | if (!len) |
427 | if (sg_param->nents == 0) { | 549 | return; |
428 | pr_err("CHCR : DMA mapping failed\n"); | 550 | |
429 | return -EINVAL; | 551 | while (sg && skip) { |
552 | if (sg_dma_len(sg) <= skip) { | ||
553 | skip -= sg_dma_len(sg); | ||
554 | skip_len = 0; | ||
555 | sg = sg_next(sg); | ||
556 | } else { | ||
557 | skip_len = skip; | ||
558 | skip = 0; | ||
559 | } | ||
560 | } | ||
561 | if (walk->nents == 0) { | ||
562 | small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len); | ||
563 | sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE); | ||
564 | walk->sgl->len0 = cpu_to_be32(sgmin); | ||
565 | walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len); | ||
566 | walk->nents++; | ||
567 | len -= sgmin; | ||
568 | walk->last_sg = sg; | ||
569 | walk->last_sg_len = sgmin + skip_len; | ||
570 | skip_len += sgmin; | ||
571 | if (sg_dma_len(sg) == skip_len) { | ||
572 | sg = sg_next(sg); | ||
573 | skip_len = 0; | ||
574 | } | ||
575 | } | ||
576 | |||
577 | while (sg && len) { | ||
578 | small = min(sg_dma_len(sg) - skip_len, len); | ||
579 | sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE); | ||
580 | walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin); | ||
581 | walk->pair->addr[walk->pair_idx] = | ||
582 | cpu_to_be64(sg_dma_address(sg) + skip_len); | ||
583 | walk->pair_idx = !walk->pair_idx; | ||
584 | walk->nents++; | ||
585 | if (!walk->pair_idx) | ||
586 | walk->pair++; | ||
587 | len -= sgmin; | ||
588 | skip_len += sgmin; | ||
589 | walk->last_sg = sg; | ||
590 | walk->last_sg_len = skip_len; | ||
591 | if (sg_dma_len(sg) == skip_len) { | ||
592 | sg = sg_next(sg); | ||
593 | skip_len = 0; | ||
594 | } | ||
430 | } | 595 | } |
431 | write_phys_cpl(phys_cpl, sg, sg_param); | ||
432 | return 0; | ||
433 | } | 596 | } |
434 | 597 | ||
435 | static inline int get_aead_subtype(struct crypto_aead *aead) | 598 | static inline int get_aead_subtype(struct crypto_aead *aead) |
@@ -449,45 +612,6 @@ static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm) | |||
449 | return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK; | 612 | return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK; |
450 | } | 613 | } |
451 | 614 | ||
452 | static inline void write_buffer_to_skb(struct sk_buff *skb, | ||
453 | unsigned int *frags, | ||
454 | char *bfr, | ||
455 | u8 bfr_len) | ||
456 | { | ||
457 | skb->len += bfr_len; | ||
458 | skb->data_len += bfr_len; | ||
459 | skb->truesize += bfr_len; | ||
460 | get_page(virt_to_page(bfr)); | ||
461 | skb_fill_page_desc(skb, *frags, virt_to_page(bfr), | ||
462 | offset_in_page(bfr), bfr_len); | ||
463 | (*frags)++; | ||
464 | } | ||
465 | |||
466 | |||
467 | static inline void | ||
468 | write_sg_to_skb(struct sk_buff *skb, unsigned int *frags, | ||
469 | struct scatterlist *sg, unsigned int count) | ||
470 | { | ||
471 | struct page *spage; | ||
472 | unsigned int page_len; | ||
473 | |||
474 | skb->len += count; | ||
475 | skb->data_len += count; | ||
476 | skb->truesize += count; | ||
477 | |||
478 | while (count > 0) { | ||
479 | if (!sg || (!(sg->length))) | ||
480 | break; | ||
481 | spage = sg_page(sg); | ||
482 | get_page(spage); | ||
483 | page_len = min(sg->length, count); | ||
484 | skb_fill_page_desc(skb, *frags, spage, sg->offset, page_len); | ||
485 | (*frags)++; | ||
486 | count -= page_len; | ||
487 | sg = sg_next(sg); | ||
488 | } | ||
489 | } | ||
490 | |||
491 | static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx) | 615 | static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx) |
492 | { | 616 | { |
493 | struct adapter *adap = netdev2adap(dev); | 617 | struct adapter *adap = netdev2adap(dev); |
@@ -524,30 +648,46 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src, | |||
524 | struct scatterlist *dst, | 648 | struct scatterlist *dst, |
525 | unsigned int minsg, | 649 | unsigned int minsg, |
526 | unsigned int space, | 650 | unsigned int space, |
527 | short int *sent, | 651 | unsigned int srcskip, |
528 | short int *dent) | 652 | unsigned int dstskip) |
529 | { | 653 | { |
530 | int srclen = 0, dstlen = 0; | 654 | int srclen = 0, dstlen = 0; |
531 | int srcsg = minsg, dstsg = 0; | 655 | int srcsg = minsg, dstsg = minsg; |
656 | int offset = 0, less; | ||
657 | |||
658 | if (sg_dma_len(src) == srcskip) { | ||
659 | src = sg_next(src); | ||
660 | srcskip = 0; | ||
661 | } | ||
532 | 662 | ||
533 | *sent = 0; | 663 | if (sg_dma_len(dst) == dstskip) { |
534 | *dent = 0; | 664 | dst = sg_next(dst); |
535 | while (src && dst && ((srcsg + 1) <= MAX_SKB_FRAGS) && | 665 | dstskip = 0; |
666 | } | ||
667 | |||
668 | while (src && dst && | ||
536 | space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) { | 669 | space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) { |
537 | srclen += src->length; | 670 | srclen += (sg_dma_len(src) - srcskip); |
538 | srcsg++; | 671 | srcsg++; |
672 | offset = 0; | ||
539 | while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) && | 673 | while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) && |
540 | space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) { | 674 | space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) { |
541 | if (srclen <= dstlen) | 675 | if (srclen <= dstlen) |
542 | break; | 676 | break; |
543 | dstlen += dst->length; | 677 | less = min_t(unsigned int, sg_dma_len(dst) - offset - |
544 | dst = sg_next(dst); | 678 | dstskip, CHCR_DST_SG_SIZE); |
679 | dstlen += less; | ||
680 | offset += less; | ||
681 | if (offset == sg_dma_len(dst)) { | ||
682 | dst = sg_next(dst); | ||
683 | offset = 0; | ||
684 | } | ||
545 | dstsg++; | 685 | dstsg++; |
686 | dstskip = 0; | ||
546 | } | 687 | } |
547 | src = sg_next(src); | 688 | src = sg_next(src); |
689 | srcskip = 0; | ||
548 | } | 690 | } |
549 | *sent = srcsg - minsg; | ||
550 | *dent = dstsg; | ||
551 | return min(srclen, dstlen); | 691 | return min(srclen, dstlen); |
552 | } | 692 | } |
553 | 693 | ||
@@ -576,47 +716,35 @@ static int chcr_cipher_fallback(struct crypto_skcipher *cipher, | |||
576 | } | 716 | } |
577 | static inline void create_wreq(struct chcr_context *ctx, | 717 | static inline void create_wreq(struct chcr_context *ctx, |
578 | struct chcr_wr *chcr_req, | 718 | struct chcr_wr *chcr_req, |
579 | void *req, struct sk_buff *skb, | 719 | struct crypto_async_request *req, |
580 | int kctx_len, int hash_sz, | 720 | unsigned int imm, |
581 | int is_iv, | 721 | int hash_sz, |
722 | unsigned int len16, | ||
582 | unsigned int sc_len, | 723 | unsigned int sc_len, |
583 | unsigned int lcb) | 724 | unsigned int lcb) |
584 | { | 725 | { |
585 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | 726 | struct uld_ctx *u_ctx = ULD_CTX(ctx); |
586 | int iv_loc = IV_DSGL; | ||
587 | int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx]; | 727 | int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx]; |
588 | unsigned int immdatalen = 0, nr_frags = 0; | ||
589 | 728 | ||
590 | if (is_ofld_imm(skb)) { | ||
591 | immdatalen = skb->data_len; | ||
592 | iv_loc = IV_IMMEDIATE; | ||
593 | } else { | ||
594 | nr_frags = skb_shinfo(skb)->nr_frags; | ||
595 | } | ||
596 | 729 | ||
597 | chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen, | 730 | chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE; |
598 | ((sizeof(chcr_req->key_ctx) + kctx_len) >> 4)); | ||
599 | chcr_req->wreq.pld_size_hash_size = | 731 | chcr_req->wreq.pld_size_hash_size = |
600 | htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) | | 732 | htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz)); |
601 | FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz)); | ||
602 | chcr_req->wreq.len16_pkd = | 733 | chcr_req->wreq.len16_pkd = |
603 | htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP( | 734 | htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16))); |
604 | (calc_tx_flits_ofld(skb) * 8), 16))); | ||
605 | chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req); | 735 | chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req); |
606 | chcr_req->wreq.rx_chid_to_rx_q_id = | 736 | chcr_req->wreq.rx_chid_to_rx_q_id = |
607 | FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid, | 737 | FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid, |
608 | is_iv ? iv_loc : IV_NOP, !!lcb, | 738 | !!lcb, ctx->tx_qidx); |
609 | ctx->tx_qidx); | ||
610 | 739 | ||
611 | chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id, | 740 | chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id, |
612 | qid); | 741 | qid); |
613 | chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8), | 742 | chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) - |
614 | 16) - ((sizeof(chcr_req->wreq)) >> 4))); | 743 | ((sizeof(chcr_req->wreq)) >> 4))); |
615 | 744 | ||
616 | chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen); | 745 | chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm); |
617 | chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + | 746 | chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + |
618 | sizeof(chcr_req->key_ctx) + | 747 | sizeof(chcr_req->key_ctx) + sc_len); |
619 | kctx_len + sc_len + immdatalen); | ||
620 | } | 748 | } |
621 | 749 | ||
622 | /** | 750 | /** |
@@ -629,47 +757,52 @@ static inline void create_wreq(struct chcr_context *ctx, | |||
629 | static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) | 757 | static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) |
630 | { | 758 | { |
631 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req); | 759 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req); |
632 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); | 760 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); |
633 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | ||
634 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | ||
635 | struct sk_buff *skb = NULL; | 761 | struct sk_buff *skb = NULL; |
636 | struct chcr_wr *chcr_req; | 762 | struct chcr_wr *chcr_req; |
637 | struct cpl_rx_phys_dsgl *phys_cpl; | 763 | struct cpl_rx_phys_dsgl *phys_cpl; |
764 | struct ulptx_sgl *ulptx; | ||
638 | struct chcr_blkcipher_req_ctx *reqctx = | 765 | struct chcr_blkcipher_req_ctx *reqctx = |
639 | ablkcipher_request_ctx(wrparam->req); | 766 | ablkcipher_request_ctx(wrparam->req); |
640 | struct phys_sge_parm sg_param; | 767 | unsigned int temp = 0, transhdr_len, dst_size; |
641 | unsigned int frags = 0, transhdr_len, phys_dsgl; | ||
642 | int error; | 768 | int error; |
643 | unsigned int ivsize = AES_BLOCK_SIZE, kctx_len; | 769 | int nents; |
770 | unsigned int kctx_len; | ||
644 | gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? | 771 | gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? |
645 | GFP_KERNEL : GFP_ATOMIC; | 772 | GFP_KERNEL : GFP_ATOMIC; |
646 | struct adapter *adap = padap(ctx->dev); | 773 | struct adapter *adap = padap(c_ctx(tfm)->dev); |
647 | |||
648 | phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents); | ||
649 | 774 | ||
775 | nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE, | ||
776 | reqctx->dst_ofst); | ||
777 | dst_size = get_space_for_phys_dsgl(nents + 1); | ||
650 | kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16); | 778 | kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16); |
651 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl); | 779 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); |
652 | skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); | 780 | nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes, |
781 | CHCR_SRC_SG_SIZE, reqctx->src_ofst); | ||
782 | temp = reqctx->imm ? (DIV_ROUND_UP((IV + wrparam->req->nbytes), 16) | ||
783 | * 16) : (sgl_len(nents + MIN_CIPHER_SG) * 8); | ||
784 | transhdr_len += temp; | ||
785 | transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16; | ||
786 | skb = alloc_skb(SGE_MAX_WR_LEN, flags); | ||
653 | if (!skb) { | 787 | if (!skb) { |
654 | error = -ENOMEM; | 788 | error = -ENOMEM; |
655 | goto err; | 789 | goto err; |
656 | } | 790 | } |
657 | skb_reserve(skb, sizeof(struct sge_opaque_hdr)); | ||
658 | chcr_req = __skb_put_zero(skb, transhdr_len); | 791 | chcr_req = __skb_put_zero(skb, transhdr_len); |
659 | chcr_req->sec_cpl.op_ivinsrtofst = | 792 | chcr_req->sec_cpl.op_ivinsrtofst = |
660 | FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 1); | 793 | FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1); |
661 | 794 | ||
662 | chcr_req->sec_cpl.pldlen = htonl(ivsize + wrparam->bytes); | 795 | chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes); |
663 | chcr_req->sec_cpl.aadstart_cipherstop_hi = | 796 | chcr_req->sec_cpl.aadstart_cipherstop_hi = |
664 | FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0); | 797 | FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0); |
665 | 798 | ||
666 | chcr_req->sec_cpl.cipherstop_lo_authinsert = | 799 | chcr_req->sec_cpl.cipherstop_lo_authinsert = |
667 | FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0); | 800 | FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0); |
668 | chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0, | 801 | chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0, |
669 | ablkctx->ciph_mode, | 802 | ablkctx->ciph_mode, |
670 | 0, 0, ivsize >> 1); | 803 | 0, 0, IV >> 1); |
671 | chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0, | 804 | chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0, |
672 | 0, 1, phys_dsgl); | 805 | 0, 0, dst_size); |
673 | 806 | ||
674 | chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr; | 807 | chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr; |
675 | if ((reqctx->op == CHCR_DECRYPT_OP) && | 808 | if ((reqctx->op == CHCR_DECRYPT_OP) && |
@@ -694,26 +827,18 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) | |||
694 | } | 827 | } |
695 | } | 828 | } |
696 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); | 829 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); |
697 | sg_param.nents = reqctx->dst_nents; | 830 | ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); |
698 | sg_param.obsize = wrparam->bytes; | 831 | chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam); |
699 | sg_param.qid = wrparam->qid; | 832 | chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid); |
700 | error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, | ||
701 | reqctx->dst, &sg_param); | ||
702 | if (error) | ||
703 | goto map_fail1; | ||
704 | 833 | ||
705 | skb_set_transport_header(skb, transhdr_len); | ||
706 | write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize); | ||
707 | write_sg_to_skb(skb, &frags, wrparam->srcsg, wrparam->bytes); | ||
708 | atomic_inc(&adap->chcr_stats.cipher_rqst); | 834 | atomic_inc(&adap->chcr_stats.cipher_rqst); |
709 | create_wreq(ctx, chcr_req, &(wrparam->req->base), skb, kctx_len, 0, 1, | 835 | temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len |
710 | sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl, | 836 | +(reqctx->imm ? (IV + wrparam->bytes) : 0); |
837 | create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0, | ||
838 | transhdr_len, temp, | ||
711 | ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC); | 839 | ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC); |
712 | reqctx->skb = skb; | 840 | reqctx->skb = skb; |
713 | skb_get(skb); | ||
714 | return skb; | 841 | return skb; |
715 | map_fail1: | ||
716 | kfree_skb(skb); | ||
717 | err: | 842 | err: |
718 | return ERR_PTR(error); | 843 | return ERR_PTR(error); |
719 | } | 844 | } |
@@ -738,8 +863,7 @@ static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher, | |||
738 | unsigned int keylen) | 863 | unsigned int keylen) |
739 | { | 864 | { |
740 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | 865 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); |
741 | struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher); | 866 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); |
742 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | ||
743 | int err = 0; | 867 | int err = 0; |
744 | 868 | ||
745 | crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK); | 869 | crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK); |
@@ -757,8 +881,7 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher, | |||
757 | const u8 *key, | 881 | const u8 *key, |
758 | unsigned int keylen) | 882 | unsigned int keylen) |
759 | { | 883 | { |
760 | struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher); | 884 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); |
761 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | ||
762 | unsigned int ck_size, context_size; | 885 | unsigned int ck_size, context_size; |
763 | u16 alignment = 0; | 886 | u16 alignment = 0; |
764 | int err; | 887 | int err; |
@@ -790,8 +913,7 @@ static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher, | |||
790 | const u8 *key, | 913 | const u8 *key, |
791 | unsigned int keylen) | 914 | unsigned int keylen) |
792 | { | 915 | { |
793 | struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher); | 916 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); |
794 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | ||
795 | unsigned int ck_size, context_size; | 917 | unsigned int ck_size, context_size; |
796 | u16 alignment = 0; | 918 | u16 alignment = 0; |
797 | int err; | 919 | int err; |
@@ -822,8 +944,7 @@ static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher, | |||
822 | const u8 *key, | 944 | const u8 *key, |
823 | unsigned int keylen) | 945 | unsigned int keylen) |
824 | { | 946 | { |
825 | struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher); | 947 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); |
826 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | ||
827 | unsigned int ck_size, context_size; | 948 | unsigned int ck_size, context_size; |
828 | u16 alignment = 0; | 949 | u16 alignment = 0; |
829 | int err; | 950 | int err; |
@@ -890,25 +1011,28 @@ static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes) | |||
890 | static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv) | 1011 | static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv) |
891 | { | 1012 | { |
892 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 1013 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
893 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); | 1014 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); |
894 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | ||
895 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | 1015 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); |
896 | struct crypto_cipher *cipher; | 1016 | struct crypto_cipher *cipher; |
897 | int ret, i; | 1017 | int ret, i; |
898 | u8 *key; | 1018 | u8 *key; |
899 | unsigned int keylen; | 1019 | unsigned int keylen; |
1020 | int round = reqctx->last_req_len / AES_BLOCK_SIZE; | ||
1021 | int round8 = round / 8; | ||
900 | 1022 | ||
901 | cipher = ablkctx->aes_generic; | 1023 | cipher = ablkctx->aes_generic; |
902 | memcpy(iv, req->info, AES_BLOCK_SIZE); | 1024 | memcpy(iv, reqctx->iv, AES_BLOCK_SIZE); |
903 | 1025 | ||
904 | keylen = ablkctx->enckey_len / 2; | 1026 | keylen = ablkctx->enckey_len / 2; |
905 | key = ablkctx->key + keylen; | 1027 | key = ablkctx->key + keylen; |
906 | ret = crypto_cipher_setkey(cipher, key, keylen); | 1028 | ret = crypto_cipher_setkey(cipher, key, keylen); |
907 | if (ret) | 1029 | if (ret) |
908 | goto out; | 1030 | goto out; |
1031 | /*H/W sends the encrypted IV in dsgl when AADIVDROP bit is 0*/ | ||
1032 | for (i = 0; i < round8; i++) | ||
1033 | gf128mul_x8_ble((le128 *)iv, (le128 *)iv); | ||
909 | 1034 | ||
910 | crypto_cipher_encrypt_one(cipher, iv, iv); | 1035 | for (i = 0; i < (round % 8); i++) |
911 | for (i = 0; i < (reqctx->processed / AES_BLOCK_SIZE); i++) | ||
912 | gf128mul_x_ble((le128 *)iv, (le128 *)iv); | 1036 | gf128mul_x_ble((le128 *)iv, (le128 *)iv); |
913 | 1037 | ||
914 | crypto_cipher_decrypt_one(cipher, iv, iv); | 1038 | crypto_cipher_decrypt_one(cipher, iv, iv); |
@@ -982,65 +1106,60 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req, | |||
982 | unsigned char *input, int err) | 1106 | unsigned char *input, int err) |
983 | { | 1107 | { |
984 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 1108 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
985 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); | 1109 | struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); |
986 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | 1110 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); |
987 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | ||
988 | struct sk_buff *skb; | 1111 | struct sk_buff *skb; |
989 | struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input; | 1112 | struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input; |
990 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | 1113 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); |
991 | struct cipher_wr_param wrparam; | 1114 | struct cipher_wr_param wrparam; |
992 | int bytes; | 1115 | int bytes; |
993 | 1116 | ||
994 | dma_unmap_sg(&u_ctx->lldi.pdev->dev, reqctx->dst, reqctx->dst_nents, | ||
995 | DMA_FROM_DEVICE); | ||
996 | |||
997 | if (reqctx->skb) { | ||
998 | kfree_skb(reqctx->skb); | ||
999 | reqctx->skb = NULL; | ||
1000 | } | ||
1001 | if (err) | 1117 | if (err) |
1002 | goto complete; | 1118 | goto unmap; |
1003 | |||
1004 | if (req->nbytes == reqctx->processed) { | 1119 | if (req->nbytes == reqctx->processed) { |
1120 | chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, | ||
1121 | req); | ||
1005 | err = chcr_final_cipher_iv(req, fw6_pld, req->info); | 1122 | err = chcr_final_cipher_iv(req, fw6_pld, req->info); |
1006 | goto complete; | 1123 | goto complete; |
1007 | } | 1124 | } |
1008 | 1125 | ||
1009 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], | 1126 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], |
1010 | ctx->tx_qidx))) { | 1127 | c_ctx(tfm)->tx_qidx))) { |
1011 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { | 1128 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { |
1012 | err = -EBUSY; | 1129 | err = -EBUSY; |
1013 | goto complete; | 1130 | goto unmap; |
1014 | } | 1131 | } |
1015 | 1132 | ||
1016 | } | 1133 | } |
1017 | wrparam.srcsg = scatterwalk_ffwd(reqctx->srcffwd, req->src, | 1134 | if (!reqctx->imm) { |
1018 | reqctx->processed); | 1135 | bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1, |
1019 | reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, reqctx->dstsg, | 1136 | SPACE_LEFT(ablkctx->enckey_len), |
1020 | reqctx->processed); | 1137 | reqctx->src_ofst, reqctx->dst_ofst); |
1021 | if (!wrparam.srcsg || !reqctx->dst) { | ||
1022 | pr_err("Input sg list length less that nbytes\n"); | ||
1023 | err = -EINVAL; | ||
1024 | goto complete; | ||
1025 | } | ||
1026 | bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dst, 1, | ||
1027 | SPACE_LEFT(ablkctx->enckey_len), | ||
1028 | &wrparam.snent, &reqctx->dst_nents); | ||
1029 | if ((bytes + reqctx->processed) >= req->nbytes) | 1138 | if ((bytes + reqctx->processed) >= req->nbytes) |
1030 | bytes = req->nbytes - reqctx->processed; | 1139 | bytes = req->nbytes - reqctx->processed; |
1031 | else | 1140 | else |
1032 | bytes = ROUND_16(bytes); | 1141 | bytes = ROUND_16(bytes); |
1142 | } else { | ||
1143 | /*CTR mode counter overfloa*/ | ||
1144 | bytes = req->nbytes - reqctx->processed; | ||
1145 | } | ||
1146 | dma_sync_single_for_cpu(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, | ||
1147 | reqctx->iv_dma, IV, DMA_BIDIRECTIONAL); | ||
1033 | err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv); | 1148 | err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv); |
1149 | dma_sync_single_for_device(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, | ||
1150 | reqctx->iv_dma, IV, DMA_BIDIRECTIONAL); | ||
1034 | if (err) | 1151 | if (err) |
1035 | goto complete; | 1152 | goto unmap; |
1036 | 1153 | ||
1037 | if (unlikely(bytes == 0)) { | 1154 | if (unlikely(bytes == 0)) { |
1155 | chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, | ||
1156 | req); | ||
1038 | err = chcr_cipher_fallback(ablkctx->sw_cipher, | 1157 | err = chcr_cipher_fallback(ablkctx->sw_cipher, |
1039 | req->base.flags, | 1158 | req->base.flags, |
1040 | wrparam.srcsg, | 1159 | req->src, |
1041 | reqctx->dst, | 1160 | req->dst, |
1042 | req->nbytes - reqctx->processed, | 1161 | req->nbytes, |
1043 | reqctx->iv, | 1162 | req->info, |
1044 | reqctx->op); | 1163 | reqctx->op); |
1045 | goto complete; | 1164 | goto complete; |
1046 | } | 1165 | } |
@@ -1048,23 +1167,24 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req, | |||
1048 | if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == | 1167 | if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == |
1049 | CRYPTO_ALG_SUB_TYPE_CTR) | 1168 | CRYPTO_ALG_SUB_TYPE_CTR) |
1050 | bytes = adjust_ctr_overflow(reqctx->iv, bytes); | 1169 | bytes = adjust_ctr_overflow(reqctx->iv, bytes); |
1051 | reqctx->processed += bytes; | 1170 | wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx]; |
1052 | wrparam.qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx]; | ||
1053 | wrparam.req = req; | 1171 | wrparam.req = req; |
1054 | wrparam.bytes = bytes; | 1172 | wrparam.bytes = bytes; |
1055 | skb = create_cipher_wr(&wrparam); | 1173 | skb = create_cipher_wr(&wrparam); |
1056 | if (IS_ERR(skb)) { | 1174 | if (IS_ERR(skb)) { |
1057 | pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); | 1175 | pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); |
1058 | err = PTR_ERR(skb); | 1176 | err = PTR_ERR(skb); |
1059 | goto complete; | 1177 | goto unmap; |
1060 | } | 1178 | } |
1061 | skb->dev = u_ctx->lldi.ports[0]; | 1179 | skb->dev = u_ctx->lldi.ports[0]; |
1062 | set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); | 1180 | set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx); |
1063 | chcr_send_wr(skb); | 1181 | chcr_send_wr(skb); |
1182 | reqctx->last_req_len = bytes; | ||
1183 | reqctx->processed += bytes; | ||
1064 | return 0; | 1184 | return 0; |
1185 | unmap: | ||
1186 | chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); | ||
1065 | complete: | 1187 | complete: |
1066 | free_new_sg(reqctx->newdstsg); | ||
1067 | reqctx->newdstsg = NULL; | ||
1068 | req->base.complete(&req->base, err); | 1188 | req->base.complete(&req->base, err); |
1069 | return err; | 1189 | return err; |
1070 | } | 1190 | } |
@@ -1077,12 +1197,10 @@ static int process_cipher(struct ablkcipher_request *req, | |||
1077 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 1197 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
1078 | unsigned int ivsize = crypto_ablkcipher_ivsize(tfm); | 1198 | unsigned int ivsize = crypto_ablkcipher_ivsize(tfm); |
1079 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | 1199 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); |
1080 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); | 1200 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); |
1081 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | ||
1082 | struct cipher_wr_param wrparam; | 1201 | struct cipher_wr_param wrparam; |
1083 | int bytes, nents, err = -EINVAL; | 1202 | int bytes, err = -EINVAL; |
1084 | 1203 | ||
1085 | reqctx->newdstsg = NULL; | ||
1086 | reqctx->processed = 0; | 1204 | reqctx->processed = 0; |
1087 | if (!req->info) | 1205 | if (!req->info) |
1088 | goto error; | 1206 | goto error; |
@@ -1093,25 +1211,41 @@ static int process_cipher(struct ablkcipher_request *req, | |||
1093 | ablkctx->enckey_len, req->nbytes, ivsize); | 1211 | ablkctx->enckey_len, req->nbytes, ivsize); |
1094 | goto error; | 1212 | goto error; |
1095 | } | 1213 | } |
1096 | wrparam.srcsg = req->src; | 1214 | chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); |
1097 | if (is_newsg(req->dst, &nents)) { | 1215 | if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) + |
1098 | reqctx->newdstsg = alloc_new_sg(req->dst, nents); | 1216 | AES_MIN_KEY_SIZE + |
1099 | if (IS_ERR(reqctx->newdstsg)) | 1217 | sizeof(struct cpl_rx_phys_dsgl) + |
1100 | return PTR_ERR(reqctx->newdstsg); | 1218 | /*Min dsgl size*/ |
1101 | reqctx->dstsg = reqctx->newdstsg; | 1219 | 32))) { |
1220 | /* Can be sent as Imm*/ | ||
1221 | unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len; | ||
1222 | |||
1223 | dnents = sg_nents_xlen(req->dst, req->nbytes, | ||
1224 | CHCR_DST_SG_SIZE, 0); | ||
1225 | dnents += 1; // IV | ||
1226 | phys_dsgl = get_space_for_phys_dsgl(dnents); | ||
1227 | kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16); | ||
1228 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl); | ||
1229 | reqctx->imm = (transhdr_len + IV + req->nbytes) <= | ||
1230 | SGE_MAX_WR_LEN; | ||
1231 | bytes = IV + req->nbytes; | ||
1232 | |||
1102 | } else { | 1233 | } else { |
1103 | reqctx->dstsg = req->dst; | 1234 | reqctx->imm = 0; |
1104 | } | 1235 | } |
1105 | bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dstsg, MIN_CIPHER_SG, | 1236 | |
1106 | SPACE_LEFT(ablkctx->enckey_len), | 1237 | if (!reqctx->imm) { |
1107 | &wrparam.snent, | 1238 | bytes = chcr_sg_ent_in_wr(req->src, req->dst, |
1108 | &reqctx->dst_nents); | 1239 | MIN_CIPHER_SG, |
1240 | SPACE_LEFT(ablkctx->enckey_len), | ||
1241 | 0, 0); | ||
1109 | if ((bytes + reqctx->processed) >= req->nbytes) | 1242 | if ((bytes + reqctx->processed) >= req->nbytes) |
1110 | bytes = req->nbytes - reqctx->processed; | 1243 | bytes = req->nbytes - reqctx->processed; |
1111 | else | 1244 | else |
1112 | bytes = ROUND_16(bytes); | 1245 | bytes = ROUND_16(bytes); |
1113 | if (unlikely(bytes > req->nbytes)) | 1246 | } else { |
1114 | bytes = req->nbytes; | 1247 | bytes = req->nbytes; |
1248 | } | ||
1115 | if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == | 1249 | if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == |
1116 | CRYPTO_ALG_SUB_TYPE_CTR) { | 1250 | CRYPTO_ALG_SUB_TYPE_CTR) { |
1117 | bytes = adjust_ctr_overflow(req->info, bytes); | 1251 | bytes = adjust_ctr_overflow(req->info, bytes); |
@@ -1128,9 +1262,11 @@ static int process_cipher(struct ablkcipher_request *req, | |||
1128 | 1262 | ||
1129 | } else { | 1263 | } else { |
1130 | 1264 | ||
1131 | memcpy(reqctx->iv, req->info, ivsize); | 1265 | memcpy(reqctx->iv, req->info, IV); |
1132 | } | 1266 | } |
1133 | if (unlikely(bytes == 0)) { | 1267 | if (unlikely(bytes == 0)) { |
1268 | chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, | ||
1269 | req); | ||
1134 | err = chcr_cipher_fallback(ablkctx->sw_cipher, | 1270 | err = chcr_cipher_fallback(ablkctx->sw_cipher, |
1135 | req->base.flags, | 1271 | req->base.flags, |
1136 | req->src, | 1272 | req->src, |
@@ -1140,45 +1276,48 @@ static int process_cipher(struct ablkcipher_request *req, | |||
1140 | op_type); | 1276 | op_type); |
1141 | goto error; | 1277 | goto error; |
1142 | } | 1278 | } |
1143 | reqctx->processed = bytes; | ||
1144 | reqctx->dst = reqctx->dstsg; | ||
1145 | reqctx->op = op_type; | 1279 | reqctx->op = op_type; |
1280 | reqctx->srcsg = req->src; | ||
1281 | reqctx->dstsg = req->dst; | ||
1282 | reqctx->src_ofst = 0; | ||
1283 | reqctx->dst_ofst = 0; | ||
1146 | wrparam.qid = qid; | 1284 | wrparam.qid = qid; |
1147 | wrparam.req = req; | 1285 | wrparam.req = req; |
1148 | wrparam.bytes = bytes; | 1286 | wrparam.bytes = bytes; |
1149 | *skb = create_cipher_wr(&wrparam); | 1287 | *skb = create_cipher_wr(&wrparam); |
1150 | if (IS_ERR(*skb)) { | 1288 | if (IS_ERR(*skb)) { |
1151 | err = PTR_ERR(*skb); | 1289 | err = PTR_ERR(*skb); |
1152 | goto error; | 1290 | goto unmap; |
1153 | } | 1291 | } |
1292 | reqctx->processed = bytes; | ||
1293 | reqctx->last_req_len = bytes; | ||
1154 | 1294 | ||
1155 | return 0; | 1295 | return 0; |
1296 | unmap: | ||
1297 | chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); | ||
1156 | error: | 1298 | error: |
1157 | free_new_sg(reqctx->newdstsg); | ||
1158 | reqctx->newdstsg = NULL; | ||
1159 | return err; | 1299 | return err; |
1160 | } | 1300 | } |
1161 | 1301 | ||
1162 | static int chcr_aes_encrypt(struct ablkcipher_request *req) | 1302 | static int chcr_aes_encrypt(struct ablkcipher_request *req) |
1163 | { | 1303 | { |
1164 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 1304 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
1165 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); | ||
1166 | struct sk_buff *skb = NULL; | 1305 | struct sk_buff *skb = NULL; |
1167 | int err; | 1306 | int err; |
1168 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | 1307 | struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); |
1169 | 1308 | ||
1170 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], | 1309 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], |
1171 | ctx->tx_qidx))) { | 1310 | c_ctx(tfm)->tx_qidx))) { |
1172 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | 1311 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
1173 | return -EBUSY; | 1312 | return -EBUSY; |
1174 | } | 1313 | } |
1175 | 1314 | ||
1176 | err = process_cipher(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], &skb, | 1315 | err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx], |
1177 | CHCR_ENCRYPT_OP); | 1316 | &skb, CHCR_ENCRYPT_OP); |
1178 | if (err || !skb) | 1317 | if (err || !skb) |
1179 | return err; | 1318 | return err; |
1180 | skb->dev = u_ctx->lldi.ports[0]; | 1319 | skb->dev = u_ctx->lldi.ports[0]; |
1181 | set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); | 1320 | set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx); |
1182 | chcr_send_wr(skb); | 1321 | chcr_send_wr(skb); |
1183 | return -EINPROGRESS; | 1322 | return -EINPROGRESS; |
1184 | } | 1323 | } |
@@ -1186,23 +1325,22 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req) | |||
1186 | static int chcr_aes_decrypt(struct ablkcipher_request *req) | 1325 | static int chcr_aes_decrypt(struct ablkcipher_request *req) |
1187 | { | 1326 | { |
1188 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 1327 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
1189 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); | 1328 | struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); |
1190 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | ||
1191 | struct sk_buff *skb = NULL; | 1329 | struct sk_buff *skb = NULL; |
1192 | int err; | 1330 | int err; |
1193 | 1331 | ||
1194 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], | 1332 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], |
1195 | ctx->tx_qidx))) { | 1333 | c_ctx(tfm)->tx_qidx))) { |
1196 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | 1334 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
1197 | return -EBUSY; | 1335 | return -EBUSY; |
1198 | } | 1336 | } |
1199 | 1337 | ||
1200 | err = process_cipher(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], &skb, | 1338 | err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx], |
1201 | CHCR_DECRYPT_OP); | 1339 | &skb, CHCR_DECRYPT_OP); |
1202 | if (err || !skb) | 1340 | if (err || !skb) |
1203 | return err; | 1341 | return err; |
1204 | skb->dev = u_ctx->lldi.ports[0]; | 1342 | skb->dev = u_ctx->lldi.ports[0]; |
1205 | set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); | 1343 | set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx); |
1206 | chcr_send_wr(skb); | 1344 | chcr_send_wr(skb); |
1207 | return -EINPROGRESS; | 1345 | return -EINPROGRESS; |
1208 | } | 1346 | } |
@@ -1350,17 +1488,19 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req, | |||
1350 | { | 1488 | { |
1351 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | 1489 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); |
1352 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 1490 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
1353 | struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); | 1491 | struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm)); |
1354 | struct hmac_ctx *hmacctx = HMAC_CTX(ctx); | ||
1355 | struct sk_buff *skb = NULL; | 1492 | struct sk_buff *skb = NULL; |
1493 | struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm)); | ||
1356 | struct chcr_wr *chcr_req; | 1494 | struct chcr_wr *chcr_req; |
1357 | unsigned int frags = 0, transhdr_len, iopad_alignment = 0; | 1495 | struct ulptx_sgl *ulptx; |
1496 | unsigned int nents = 0, transhdr_len, iopad_alignment = 0; | ||
1358 | unsigned int digestsize = crypto_ahash_digestsize(tfm); | 1497 | unsigned int digestsize = crypto_ahash_digestsize(tfm); |
1359 | unsigned int kctx_len = 0; | 1498 | unsigned int kctx_len = 0, temp = 0; |
1360 | u8 hash_size_in_response = 0; | 1499 | u8 hash_size_in_response = 0; |
1361 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | 1500 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
1362 | GFP_ATOMIC; | 1501 | GFP_ATOMIC; |
1363 | struct adapter *adap = padap(ctx->dev); | 1502 | struct adapter *adap = padap(h_ctx(tfm)->dev); |
1503 | int error = 0; | ||
1364 | 1504 | ||
1365 | iopad_alignment = KEYCTX_ALIGN_PAD(digestsize); | 1505 | iopad_alignment = KEYCTX_ALIGN_PAD(digestsize); |
1366 | kctx_len = param->alg_prm.result_size + iopad_alignment; | 1506 | kctx_len = param->alg_prm.result_size + iopad_alignment; |
@@ -1372,15 +1512,22 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req, | |||
1372 | else | 1512 | else |
1373 | hash_size_in_response = param->alg_prm.result_size; | 1513 | hash_size_in_response = param->alg_prm.result_size; |
1374 | transhdr_len = HASH_TRANSHDR_SIZE(kctx_len); | 1514 | transhdr_len = HASH_TRANSHDR_SIZE(kctx_len); |
1375 | skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); | 1515 | req_ctx->imm = (transhdr_len + param->bfr_len + param->sg_len) <= |
1516 | SGE_MAX_WR_LEN; | ||
1517 | nents = sg_nents_xlen(req->src, param->sg_len, CHCR_SRC_SG_SIZE, 0); | ||
1518 | nents += param->bfr_len ? 1 : 0; | ||
1519 | transhdr_len += req_ctx->imm ? (DIV_ROUND_UP((param->bfr_len + | ||
1520 | param->sg_len), 16) * 16) : | ||
1521 | (sgl_len(nents) * 8); | ||
1522 | transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16; | ||
1523 | |||
1524 | skb = alloc_skb(SGE_MAX_WR_LEN, flags); | ||
1376 | if (!skb) | 1525 | if (!skb) |
1377 | return skb; | 1526 | return ERR_PTR(-ENOMEM); |
1378 | |||
1379 | skb_reserve(skb, sizeof(struct sge_opaque_hdr)); | ||
1380 | chcr_req = __skb_put_zero(skb, transhdr_len); | 1527 | chcr_req = __skb_put_zero(skb, transhdr_len); |
1381 | 1528 | ||
1382 | chcr_req->sec_cpl.op_ivinsrtofst = | 1529 | chcr_req->sec_cpl.op_ivinsrtofst = |
1383 | FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 0); | 1530 | FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0); |
1384 | chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len); | 1531 | chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len); |
1385 | 1532 | ||
1386 | chcr_req->sec_cpl.aadstart_cipherstop_hi = | 1533 | chcr_req->sec_cpl.aadstart_cipherstop_hi = |
@@ -1409,37 +1556,52 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req, | |||
1409 | ((kctx_len + | 1556 | ((kctx_len + |
1410 | sizeof(chcr_req->key_ctx)) >> 4)); | 1557 | sizeof(chcr_req->key_ctx)) >> 4)); |
1411 | chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1); | 1558 | chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1); |
1412 | 1559 | ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + kctx_len + | |
1413 | skb_set_transport_header(skb, transhdr_len); | 1560 | DUMMY_BYTES); |
1414 | if (param->bfr_len != 0) | 1561 | if (param->bfr_len != 0) { |
1415 | write_buffer_to_skb(skb, &frags, req_ctx->reqbfr, | 1562 | req_ctx->dma_addr = dma_map_single(&u_ctx->lldi.pdev->dev, |
1416 | param->bfr_len); | 1563 | req_ctx->reqbfr, param->bfr_len, |
1417 | if (param->sg_len != 0) | 1564 | DMA_TO_DEVICE); |
1418 | write_sg_to_skb(skb, &frags, req->src, param->sg_len); | 1565 | if (dma_mapping_error(&u_ctx->lldi.pdev->dev, |
1566 | req_ctx->dma_addr)) { | ||
1567 | error = -ENOMEM; | ||
1568 | goto err; | ||
1569 | } | ||
1570 | req_ctx->dma_len = param->bfr_len; | ||
1571 | } else { | ||
1572 | req_ctx->dma_addr = 0; | ||
1573 | } | ||
1574 | chcr_add_hash_src_ent(req, ulptx, param); | ||
1575 | /* Request upto max wr size */ | ||
1576 | temp = kctx_len + DUMMY_BYTES + (req_ctx->imm ? (param->sg_len | ||
1577 | + param->bfr_len) : 0); | ||
1419 | atomic_inc(&adap->chcr_stats.digest_rqst); | 1578 | atomic_inc(&adap->chcr_stats.digest_rqst); |
1420 | create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, | 1579 | create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->imm, |
1421 | hash_size_in_response, 0, DUMMY_BYTES, 0); | 1580 | hash_size_in_response, transhdr_len, |
1581 | temp, 0); | ||
1422 | req_ctx->skb = skb; | 1582 | req_ctx->skb = skb; |
1423 | skb_get(skb); | ||
1424 | return skb; | 1583 | return skb; |
1584 | err: | ||
1585 | kfree_skb(skb); | ||
1586 | return ERR_PTR(error); | ||
1425 | } | 1587 | } |
1426 | 1588 | ||
1427 | static int chcr_ahash_update(struct ahash_request *req) | 1589 | static int chcr_ahash_update(struct ahash_request *req) |
1428 | { | 1590 | { |
1429 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | 1591 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); |
1430 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); | 1592 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); |
1431 | struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm)); | ||
1432 | struct uld_ctx *u_ctx = NULL; | 1593 | struct uld_ctx *u_ctx = NULL; |
1433 | struct sk_buff *skb; | 1594 | struct sk_buff *skb; |
1434 | u8 remainder = 0, bs; | 1595 | u8 remainder = 0, bs; |
1435 | unsigned int nbytes = req->nbytes; | 1596 | unsigned int nbytes = req->nbytes; |
1436 | struct hash_wr_param params; | 1597 | struct hash_wr_param params; |
1598 | int error; | ||
1437 | 1599 | ||
1438 | bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); | 1600 | bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); |
1439 | 1601 | ||
1440 | u_ctx = ULD_CTX(ctx); | 1602 | u_ctx = ULD_CTX(h_ctx(rtfm)); |
1441 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], | 1603 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], |
1442 | ctx->tx_qidx))) { | 1604 | h_ctx(rtfm)->tx_qidx))) { |
1443 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | 1605 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
1444 | return -EBUSY; | 1606 | return -EBUSY; |
1445 | } | 1607 | } |
@@ -1453,7 +1615,9 @@ static int chcr_ahash_update(struct ahash_request *req) | |||
1453 | req_ctx->reqlen += nbytes; | 1615 | req_ctx->reqlen += nbytes; |
1454 | return 0; | 1616 | return 0; |
1455 | } | 1617 | } |
1456 | 1618 | error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); | |
1619 | if (error) | ||
1620 | return -ENOMEM; | ||
1457 | params.opad_needed = 0; | 1621 | params.opad_needed = 0; |
1458 | params.more = 1; | 1622 | params.more = 1; |
1459 | params.last = 0; | 1623 | params.last = 0; |
@@ -1464,25 +1628,27 @@ static int chcr_ahash_update(struct ahash_request *req) | |||
1464 | req_ctx->result = 0; | 1628 | req_ctx->result = 0; |
1465 | req_ctx->data_len += params.sg_len + params.bfr_len; | 1629 | req_ctx->data_len += params.sg_len + params.bfr_len; |
1466 | skb = create_hash_wr(req, ¶ms); | 1630 | skb = create_hash_wr(req, ¶ms); |
1467 | if (!skb) | 1631 | if (IS_ERR(skb)) { |
1468 | return -ENOMEM; | 1632 | error = PTR_ERR(skb); |
1633 | goto unmap; | ||
1634 | } | ||
1469 | 1635 | ||
1470 | if (remainder) { | 1636 | if (remainder) { |
1471 | u8 *temp; | ||
1472 | /* Swap buffers */ | 1637 | /* Swap buffers */ |
1473 | temp = req_ctx->reqbfr; | 1638 | swap(req_ctx->reqbfr, req_ctx->skbfr); |
1474 | req_ctx->reqbfr = req_ctx->skbfr; | ||
1475 | req_ctx->skbfr = temp; | ||
1476 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), | 1639 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), |
1477 | req_ctx->reqbfr, remainder, req->nbytes - | 1640 | req_ctx->reqbfr, remainder, req->nbytes - |
1478 | remainder); | 1641 | remainder); |
1479 | } | 1642 | } |
1480 | req_ctx->reqlen = remainder; | 1643 | req_ctx->reqlen = remainder; |
1481 | skb->dev = u_ctx->lldi.ports[0]; | 1644 | skb->dev = u_ctx->lldi.ports[0]; |
1482 | set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); | 1645 | set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); |
1483 | chcr_send_wr(skb); | 1646 | chcr_send_wr(skb); |
1484 | 1647 | ||
1485 | return -EINPROGRESS; | 1648 | return -EINPROGRESS; |
1649 | unmap: | ||
1650 | chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); | ||
1651 | return error; | ||
1486 | } | 1652 | } |
1487 | 1653 | ||
1488 | static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1) | 1654 | static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1) |
@@ -1499,13 +1665,12 @@ static int chcr_ahash_final(struct ahash_request *req) | |||
1499 | { | 1665 | { |
1500 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | 1666 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); |
1501 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); | 1667 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); |
1502 | struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm)); | ||
1503 | struct hash_wr_param params; | 1668 | struct hash_wr_param params; |
1504 | struct sk_buff *skb; | 1669 | struct sk_buff *skb; |
1505 | struct uld_ctx *u_ctx = NULL; | 1670 | struct uld_ctx *u_ctx = NULL; |
1506 | u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); | 1671 | u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); |
1507 | 1672 | ||
1508 | u_ctx = ULD_CTX(ctx); | 1673 | u_ctx = ULD_CTX(h_ctx(rtfm)); |
1509 | if (is_hmac(crypto_ahash_tfm(rtfm))) | 1674 | if (is_hmac(crypto_ahash_tfm(rtfm))) |
1510 | params.opad_needed = 1; | 1675 | params.opad_needed = 1; |
1511 | else | 1676 | else |
@@ -1528,11 +1693,11 @@ static int chcr_ahash_final(struct ahash_request *req) | |||
1528 | params.more = 0; | 1693 | params.more = 0; |
1529 | } | 1694 | } |
1530 | skb = create_hash_wr(req, ¶ms); | 1695 | skb = create_hash_wr(req, ¶ms); |
1531 | if (!skb) | 1696 | if (IS_ERR(skb)) |
1532 | return -ENOMEM; | 1697 | return PTR_ERR(skb); |
1533 | 1698 | ||
1534 | skb->dev = u_ctx->lldi.ports[0]; | 1699 | skb->dev = u_ctx->lldi.ports[0]; |
1535 | set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); | 1700 | set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); |
1536 | chcr_send_wr(skb); | 1701 | chcr_send_wr(skb); |
1537 | return -EINPROGRESS; | 1702 | return -EINPROGRESS; |
1538 | } | 1703 | } |
@@ -1541,17 +1706,17 @@ static int chcr_ahash_finup(struct ahash_request *req) | |||
1541 | { | 1706 | { |
1542 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | 1707 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); |
1543 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); | 1708 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); |
1544 | struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm)); | ||
1545 | struct uld_ctx *u_ctx = NULL; | 1709 | struct uld_ctx *u_ctx = NULL; |
1546 | struct sk_buff *skb; | 1710 | struct sk_buff *skb; |
1547 | struct hash_wr_param params; | 1711 | struct hash_wr_param params; |
1548 | u8 bs; | 1712 | u8 bs; |
1713 | int error; | ||
1549 | 1714 | ||
1550 | bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); | 1715 | bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); |
1551 | u_ctx = ULD_CTX(ctx); | 1716 | u_ctx = ULD_CTX(h_ctx(rtfm)); |
1552 | 1717 | ||
1553 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], | 1718 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], |
1554 | ctx->tx_qidx))) { | 1719 | h_ctx(rtfm)->tx_qidx))) { |
1555 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | 1720 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
1556 | return -EBUSY; | 1721 | return -EBUSY; |
1557 | } | 1722 | } |
@@ -1577,34 +1742,41 @@ static int chcr_ahash_finup(struct ahash_request *req) | |||
1577 | params.last = 1; | 1742 | params.last = 1; |
1578 | params.more = 0; | 1743 | params.more = 0; |
1579 | } | 1744 | } |
1580 | 1745 | error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); | |
1581 | skb = create_hash_wr(req, ¶ms); | 1746 | if (error) |
1582 | if (!skb) | ||
1583 | return -ENOMEM; | 1747 | return -ENOMEM; |
1584 | 1748 | ||
1749 | skb = create_hash_wr(req, ¶ms); | ||
1750 | if (IS_ERR(skb)) { | ||
1751 | error = PTR_ERR(skb); | ||
1752 | goto unmap; | ||
1753 | } | ||
1585 | skb->dev = u_ctx->lldi.ports[0]; | 1754 | skb->dev = u_ctx->lldi.ports[0]; |
1586 | set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); | 1755 | set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); |
1587 | chcr_send_wr(skb); | 1756 | chcr_send_wr(skb); |
1588 | 1757 | ||
1589 | return -EINPROGRESS; | 1758 | return -EINPROGRESS; |
1759 | unmap: | ||
1760 | chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); | ||
1761 | return error; | ||
1590 | } | 1762 | } |
1591 | 1763 | ||
1592 | static int chcr_ahash_digest(struct ahash_request *req) | 1764 | static int chcr_ahash_digest(struct ahash_request *req) |
1593 | { | 1765 | { |
1594 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | 1766 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); |
1595 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); | 1767 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); |
1596 | struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm)); | ||
1597 | struct uld_ctx *u_ctx = NULL; | 1768 | struct uld_ctx *u_ctx = NULL; |
1598 | struct sk_buff *skb; | 1769 | struct sk_buff *skb; |
1599 | struct hash_wr_param params; | 1770 | struct hash_wr_param params; |
1600 | u8 bs; | 1771 | u8 bs; |
1772 | int error; | ||
1601 | 1773 | ||
1602 | rtfm->init(req); | 1774 | rtfm->init(req); |
1603 | bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); | 1775 | bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); |
1604 | 1776 | ||
1605 | u_ctx = ULD_CTX(ctx); | 1777 | u_ctx = ULD_CTX(h_ctx(rtfm)); |
1606 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], | 1778 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], |
1607 | ctx->tx_qidx))) { | 1779 | h_ctx(rtfm)->tx_qidx))) { |
1608 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | 1780 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
1609 | return -EBUSY; | 1781 | return -EBUSY; |
1610 | } | 1782 | } |
@@ -1613,6 +1785,9 @@ static int chcr_ahash_digest(struct ahash_request *req) | |||
1613 | params.opad_needed = 1; | 1785 | params.opad_needed = 1; |
1614 | else | 1786 | else |
1615 | params.opad_needed = 0; | 1787 | params.opad_needed = 0; |
1788 | error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); | ||
1789 | if (error) | ||
1790 | return -ENOMEM; | ||
1616 | 1791 | ||
1617 | params.last = 0; | 1792 | params.last = 0; |
1618 | params.more = 0; | 1793 | params.more = 0; |
@@ -1630,13 +1805,17 @@ static int chcr_ahash_digest(struct ahash_request *req) | |||
1630 | } | 1805 | } |
1631 | 1806 | ||
1632 | skb = create_hash_wr(req, ¶ms); | 1807 | skb = create_hash_wr(req, ¶ms); |
1633 | if (!skb) | 1808 | if (IS_ERR(skb)) { |
1634 | return -ENOMEM; | 1809 | error = PTR_ERR(skb); |
1635 | 1810 | goto unmap; | |
1811 | } | ||
1636 | skb->dev = u_ctx->lldi.ports[0]; | 1812 | skb->dev = u_ctx->lldi.ports[0]; |
1637 | set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); | 1813 | set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); |
1638 | chcr_send_wr(skb); | 1814 | chcr_send_wr(skb); |
1639 | return -EINPROGRESS; | 1815 | return -EINPROGRESS; |
1816 | unmap: | ||
1817 | chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); | ||
1818 | return error; | ||
1640 | } | 1819 | } |
1641 | 1820 | ||
1642 | static int chcr_ahash_export(struct ahash_request *areq, void *out) | 1821 | static int chcr_ahash_export(struct ahash_request *areq, void *out) |
@@ -1646,6 +1825,8 @@ static int chcr_ahash_export(struct ahash_request *areq, void *out) | |||
1646 | 1825 | ||
1647 | state->reqlen = req_ctx->reqlen; | 1826 | state->reqlen = req_ctx->reqlen; |
1648 | state->data_len = req_ctx->data_len; | 1827 | state->data_len = req_ctx->data_len; |
1828 | state->is_sg_map = 0; | ||
1829 | state->result = 0; | ||
1649 | memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen); | 1830 | memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen); |
1650 | memcpy(state->partial_hash, req_ctx->partial_hash, | 1831 | memcpy(state->partial_hash, req_ctx->partial_hash, |
1651 | CHCR_HASH_MAX_DIGEST_SIZE); | 1832 | CHCR_HASH_MAX_DIGEST_SIZE); |
@@ -1661,6 +1842,8 @@ static int chcr_ahash_import(struct ahash_request *areq, const void *in) | |||
1661 | req_ctx->data_len = state->data_len; | 1842 | req_ctx->data_len = state->data_len; |
1662 | req_ctx->reqbfr = req_ctx->bfr1; | 1843 | req_ctx->reqbfr = req_ctx->bfr1; |
1663 | req_ctx->skbfr = req_ctx->bfr2; | 1844 | req_ctx->skbfr = req_ctx->bfr2; |
1845 | req_ctx->is_sg_map = 0; | ||
1846 | req_ctx->result = 0; | ||
1664 | memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128); | 1847 | memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128); |
1665 | memcpy(req_ctx->partial_hash, state->partial_hash, | 1848 | memcpy(req_ctx->partial_hash, state->partial_hash, |
1666 | CHCR_HASH_MAX_DIGEST_SIZE); | 1849 | CHCR_HASH_MAX_DIGEST_SIZE); |
@@ -1670,8 +1853,7 @@ static int chcr_ahash_import(struct ahash_request *areq, const void *in) | |||
1670 | static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, | 1853 | static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, |
1671 | unsigned int keylen) | 1854 | unsigned int keylen) |
1672 | { | 1855 | { |
1673 | struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); | 1856 | struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm)); |
1674 | struct hmac_ctx *hmacctx = HMAC_CTX(ctx); | ||
1675 | unsigned int digestsize = crypto_ahash_digestsize(tfm); | 1857 | unsigned int digestsize = crypto_ahash_digestsize(tfm); |
1676 | unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | 1858 | unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); |
1677 | unsigned int i, err = 0, updated_digestsize; | 1859 | unsigned int i, err = 0, updated_digestsize; |
@@ -1724,8 +1906,7 @@ out: | |||
1724 | static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | 1906 | static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key, |
1725 | unsigned int key_len) | 1907 | unsigned int key_len) |
1726 | { | 1908 | { |
1727 | struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher); | 1909 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); |
1728 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | ||
1729 | unsigned short context_size = 0; | 1910 | unsigned short context_size = 0; |
1730 | int err; | 1911 | int err; |
1731 | 1912 | ||
@@ -1764,6 +1945,7 @@ static int chcr_sha_init(struct ahash_request *areq) | |||
1764 | req_ctx->skbfr = req_ctx->bfr2; | 1945 | req_ctx->skbfr = req_ctx->bfr2; |
1765 | req_ctx->skb = NULL; | 1946 | req_ctx->skb = NULL; |
1766 | req_ctx->result = 0; | 1947 | req_ctx->result = 0; |
1948 | req_ctx->is_sg_map = 0; | ||
1767 | copy_hash_init_values(req_ctx->partial_hash, digestsize); | 1949 | copy_hash_init_values(req_ctx->partial_hash, digestsize); |
1768 | return 0; | 1950 | return 0; |
1769 | } | 1951 | } |
@@ -1779,8 +1961,7 @@ static int chcr_hmac_init(struct ahash_request *areq) | |||
1779 | { | 1961 | { |
1780 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | 1962 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); |
1781 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq); | 1963 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq); |
1782 | struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm)); | 1964 | struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm)); |
1783 | struct hmac_ctx *hmacctx = HMAC_CTX(ctx); | ||
1784 | unsigned int digestsize = crypto_ahash_digestsize(rtfm); | 1965 | unsigned int digestsize = crypto_ahash_digestsize(rtfm); |
1785 | unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); | 1966 | unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); |
1786 | 1967 | ||
@@ -1826,86 +2007,48 @@ static void chcr_hmac_cra_exit(struct crypto_tfm *tfm) | |||
1826 | } | 2007 | } |
1827 | } | 2008 | } |
1828 | 2009 | ||
1829 | static int is_newsg(struct scatterlist *sgl, unsigned int *newents) | 2010 | static int chcr_aead_common_init(struct aead_request *req, |
1830 | { | 2011 | unsigned short op_type) |
1831 | int nents = 0; | ||
1832 | int ret = 0; | ||
1833 | |||
1834 | while (sgl) { | ||
1835 | if (sgl->length > CHCR_SG_SIZE) | ||
1836 | ret = 1; | ||
1837 | nents += DIV_ROUND_UP(sgl->length, CHCR_SG_SIZE); | ||
1838 | sgl = sg_next(sgl); | ||
1839 | } | ||
1840 | *newents = nents; | ||
1841 | return ret; | ||
1842 | } | ||
1843 | |||
1844 | static inline void free_new_sg(struct scatterlist *sgl) | ||
1845 | { | 2012 | { |
1846 | kfree(sgl); | 2013 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
1847 | } | 2014 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
1848 | 2015 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | |
1849 | static struct scatterlist *alloc_new_sg(struct scatterlist *sgl, | 2016 | int error = -EINVAL; |
1850 | unsigned int nents) | 2017 | unsigned int dst_size; |
1851 | { | 2018 | unsigned int authsize = crypto_aead_authsize(tfm); |
1852 | struct scatterlist *newsg, *sg; | ||
1853 | int i, len, processed = 0; | ||
1854 | struct page *spage; | ||
1855 | int offset; | ||
1856 | 2019 | ||
1857 | newsg = kmalloc_array(nents, sizeof(struct scatterlist), GFP_KERNEL); | 2020 | dst_size = req->assoclen + req->cryptlen + (op_type ? |
1858 | if (!newsg) | 2021 | -authsize : authsize); |
1859 | return ERR_PTR(-ENOMEM); | 2022 | /* validate key size */ |
1860 | sg = newsg; | 2023 | if (aeadctx->enckey_len == 0) |
1861 | sg_init_table(sg, nents); | 2024 | goto err; |
1862 | offset = sgl->offset; | 2025 | if (op_type && req->cryptlen < authsize) |
1863 | spage = sg_page(sgl); | 2026 | goto err; |
1864 | for (i = 0; i < nents; i++) { | 2027 | error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, |
1865 | len = min_t(u32, sgl->length - processed, CHCR_SG_SIZE); | 2028 | op_type); |
1866 | sg_set_page(sg, spage, len, offset); | 2029 | if (error) { |
1867 | processed += len; | 2030 | error = -ENOMEM; |
1868 | offset += len; | 2031 | goto err; |
1869 | if (offset >= PAGE_SIZE) { | ||
1870 | offset = offset % PAGE_SIZE; | ||
1871 | spage++; | ||
1872 | } | ||
1873 | if (processed == sgl->length) { | ||
1874 | processed = 0; | ||
1875 | sgl = sg_next(sgl); | ||
1876 | if (!sgl) | ||
1877 | break; | ||
1878 | spage = sg_page(sgl); | ||
1879 | offset = sgl->offset; | ||
1880 | } | ||
1881 | sg = sg_next(sg); | ||
1882 | } | 2032 | } |
1883 | return newsg; | 2033 | reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen, |
2034 | CHCR_SRC_SG_SIZE, 0); | ||
2035 | reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen, | ||
2036 | CHCR_SRC_SG_SIZE, req->assoclen); | ||
2037 | return 0; | ||
2038 | err: | ||
2039 | return error; | ||
1884 | } | 2040 | } |
1885 | 2041 | ||
1886 | static int chcr_copy_assoc(struct aead_request *req, | 2042 | static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents, |
1887 | struct chcr_aead_ctx *ctx) | ||
1888 | { | ||
1889 | SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null); | ||
1890 | |||
1891 | skcipher_request_set_tfm(skreq, ctx->null); | ||
1892 | skcipher_request_set_callback(skreq, aead_request_flags(req), | ||
1893 | NULL, NULL); | ||
1894 | skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen, | ||
1895 | NULL); | ||
1896 | |||
1897 | return crypto_skcipher_encrypt(skreq); | ||
1898 | } | ||
1899 | static int chcr_aead_need_fallback(struct aead_request *req, int src_nent, | ||
1900 | int aadmax, int wrlen, | 2043 | int aadmax, int wrlen, |
1901 | unsigned short op_type) | 2044 | unsigned short op_type) |
1902 | { | 2045 | { |
1903 | unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); | 2046 | unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); |
1904 | 2047 | ||
1905 | if (((req->cryptlen - (op_type ? authsize : 0)) == 0) || | 2048 | if (((req->cryptlen - (op_type ? authsize : 0)) == 0) || |
2049 | dst_nents > MAX_DSGL_ENT || | ||
1906 | (req->assoclen > aadmax) || | 2050 | (req->assoclen > aadmax) || |
1907 | (src_nent > MAX_SKB_FRAGS) || | 2051 | (wrlen > SGE_MAX_WR_LEN)) |
1908 | (wrlen > MAX_WR_SIZE)) | ||
1909 | return 1; | 2052 | return 1; |
1910 | return 0; | 2053 | return 0; |
1911 | } | 2054 | } |
@@ -1913,8 +2056,7 @@ static int chcr_aead_need_fallback(struct aead_request *req, int src_nent, | |||
1913 | static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type) | 2056 | static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type) |
1914 | { | 2057 | { |
1915 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 2058 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
1916 | struct chcr_context *ctx = crypto_aead_ctx(tfm); | 2059 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
1917 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | ||
1918 | struct aead_request *subreq = aead_request_ctx(req); | 2060 | struct aead_request *subreq = aead_request_ctx(req); |
1919 | 2061 | ||
1920 | aead_request_set_tfm(subreq, aeadctx->sw_cipher); | 2062 | aead_request_set_tfm(subreq, aeadctx->sw_cipher); |
@@ -1933,96 +2075,75 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, | |||
1933 | unsigned short op_type) | 2075 | unsigned short op_type) |
1934 | { | 2076 | { |
1935 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 2077 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
1936 | struct chcr_context *ctx = crypto_aead_ctx(tfm); | 2078 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
1937 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | ||
1938 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | ||
1939 | struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); | 2079 | struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); |
1940 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | 2080 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); |
1941 | struct sk_buff *skb = NULL; | 2081 | struct sk_buff *skb = NULL; |
1942 | struct chcr_wr *chcr_req; | 2082 | struct chcr_wr *chcr_req; |
1943 | struct cpl_rx_phys_dsgl *phys_cpl; | 2083 | struct cpl_rx_phys_dsgl *phys_cpl; |
1944 | struct phys_sge_parm sg_param; | 2084 | struct ulptx_sgl *ulptx; |
1945 | struct scatterlist *src; | 2085 | unsigned int transhdr_len; |
1946 | unsigned int frags = 0, transhdr_len; | 2086 | unsigned int dst_size = 0, temp; |
1947 | unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0; | 2087 | unsigned int kctx_len = 0, dnents; |
1948 | unsigned int kctx_len = 0, nents; | ||
1949 | unsigned short stop_offset = 0; | ||
1950 | unsigned int assoclen = req->assoclen; | 2088 | unsigned int assoclen = req->assoclen; |
1951 | unsigned int authsize = crypto_aead_authsize(tfm); | 2089 | unsigned int authsize = crypto_aead_authsize(tfm); |
1952 | int error = -EINVAL, src_nent; | 2090 | int error = -EINVAL; |
1953 | int null = 0; | 2091 | int null = 0; |
1954 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | 2092 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
1955 | GFP_ATOMIC; | 2093 | GFP_ATOMIC; |
1956 | struct adapter *adap = padap(ctx->dev); | 2094 | struct adapter *adap = padap(a_ctx(tfm)->dev); |
1957 | 2095 | ||
1958 | reqctx->newdstsg = NULL; | 2096 | if (req->cryptlen == 0) |
1959 | dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize : | 2097 | return NULL; |
1960 | authsize); | ||
1961 | if (aeadctx->enckey_len == 0 || (req->cryptlen <= 0)) | ||
1962 | goto err; | ||
1963 | |||
1964 | if (op_type && req->cryptlen < crypto_aead_authsize(tfm)) | ||
1965 | goto err; | ||
1966 | src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen); | ||
1967 | if (src_nent < 0) | ||
1968 | goto err; | ||
1969 | src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen); | ||
1970 | 2098 | ||
1971 | if (req->src != req->dst) { | 2099 | reqctx->b0_dma = 0; |
1972 | error = chcr_copy_assoc(req, aeadctx); | ||
1973 | if (error) | ||
1974 | return ERR_PTR(error); | ||
1975 | } | ||
1976 | if (dst_size && is_newsg(req->dst, &nents)) { | ||
1977 | reqctx->newdstsg = alloc_new_sg(req->dst, nents); | ||
1978 | if (IS_ERR(reqctx->newdstsg)) | ||
1979 | return ERR_CAST(reqctx->newdstsg); | ||
1980 | reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, | ||
1981 | reqctx->newdstsg, req->assoclen); | ||
1982 | } else { | ||
1983 | if (req->src == req->dst) | ||
1984 | reqctx->dst = src; | ||
1985 | else | ||
1986 | reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, | ||
1987 | req->dst, req->assoclen); | ||
1988 | } | ||
1989 | if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) { | 2100 | if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) { |
1990 | null = 1; | 2101 | null = 1; |
1991 | assoclen = 0; | 2102 | assoclen = 0; |
1992 | } | 2103 | } |
1993 | reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen + | 2104 | dst_size = assoclen + req->cryptlen + (op_type ? -authsize : |
1994 | (op_type ? -authsize : authsize)); | 2105 | authsize); |
1995 | if (reqctx->dst_nents < 0) { | 2106 | error = chcr_aead_common_init(req, op_type); |
1996 | pr_err("AUTHENC:Invalid Destination sg entries\n"); | 2107 | if (error) |
1997 | error = -EINVAL; | 2108 | return ERR_PTR(error); |
1998 | goto err; | 2109 | if (dst_size) { |
2110 | dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); | ||
2111 | dnents += sg_nents_xlen(req->dst, req->cryptlen + | ||
2112 | (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE, | ||
2113 | req->assoclen); | ||
2114 | dnents += MIN_AUTH_SG; // For IV | ||
2115 | } else { | ||
2116 | dnents = 0; | ||
1999 | } | 2117 | } |
2000 | dst_size = get_space_for_phys_dsgl(reqctx->dst_nents); | 2118 | |
2119 | dst_size = get_space_for_phys_dsgl(dnents); | ||
2001 | kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4) | 2120 | kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4) |
2002 | - sizeof(chcr_req->key_ctx); | 2121 | - sizeof(chcr_req->key_ctx); |
2003 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); | 2122 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); |
2004 | if (chcr_aead_need_fallback(req, src_nent + MIN_AUTH_SG, | 2123 | reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) < |
2005 | T6_MAX_AAD_SIZE, | 2124 | SGE_MAX_WR_LEN; |
2006 | transhdr_len + (sgl_len(src_nent + MIN_AUTH_SG) * 8), | 2125 | temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + req->cryptlen), 16) |
2007 | op_type)) { | 2126 | * 16) : (sgl_len(reqctx->src_nents + reqctx->aad_nents |
2127 | + MIN_GCM_SG) * 8); | ||
2128 | transhdr_len += temp; | ||
2129 | transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16; | ||
2130 | |||
2131 | if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE, | ||
2132 | transhdr_len, op_type)) { | ||
2008 | atomic_inc(&adap->chcr_stats.fallback); | 2133 | atomic_inc(&adap->chcr_stats.fallback); |
2009 | free_new_sg(reqctx->newdstsg); | 2134 | chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, |
2010 | reqctx->newdstsg = NULL; | 2135 | op_type); |
2011 | return ERR_PTR(chcr_aead_fallback(req, op_type)); | 2136 | return ERR_PTR(chcr_aead_fallback(req, op_type)); |
2012 | } | 2137 | } |
2013 | skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); | 2138 | skb = alloc_skb(SGE_MAX_WR_LEN, flags); |
2014 | if (!skb) { | 2139 | if (!skb) { |
2015 | error = -ENOMEM; | 2140 | error = -ENOMEM; |
2016 | goto err; | 2141 | goto err; |
2017 | } | 2142 | } |
2018 | 2143 | ||
2019 | /* LLD is going to write the sge hdr. */ | ||
2020 | skb_reserve(skb, sizeof(struct sge_opaque_hdr)); | ||
2021 | |||
2022 | /* Write WR */ | ||
2023 | chcr_req = __skb_put_zero(skb, transhdr_len); | 2144 | chcr_req = __skb_put_zero(skb, transhdr_len); |
2024 | 2145 | ||
2025 | stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize; | 2146 | temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize; |
2026 | 2147 | ||
2027 | /* | 2148 | /* |
2028 | * Input order is AAD,IV and Payload. where IV should be included as | 2149 | * Input order is AAD,IV and Payload. where IV should be included as |
@@ -2030,24 +2151,24 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, | |||
2030 | * to the hardware spec | 2151 | * to the hardware spec |
2031 | */ | 2152 | */ |
2032 | chcr_req->sec_cpl.op_ivinsrtofst = | 2153 | chcr_req->sec_cpl.op_ivinsrtofst = |
2033 | FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, | 2154 | FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2, |
2034 | (ivsize ? (assoclen + 1) : 0)); | 2155 | assoclen + 1); |
2035 | chcr_req->sec_cpl.pldlen = htonl(assoclen + ivsize + req->cryptlen); | 2156 | chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen); |
2036 | chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( | 2157 | chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( |
2037 | assoclen ? 1 : 0, assoclen, | 2158 | assoclen ? 1 : 0, assoclen, |
2038 | assoclen + ivsize + 1, | 2159 | assoclen + IV + 1, |
2039 | (stop_offset & 0x1F0) >> 4); | 2160 | (temp & 0x1F0) >> 4); |
2040 | chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT( | 2161 | chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT( |
2041 | stop_offset & 0xF, | 2162 | temp & 0xF, |
2042 | null ? 0 : assoclen + ivsize + 1, | 2163 | null ? 0 : assoclen + IV + 1, |
2043 | stop_offset, stop_offset); | 2164 | temp, temp); |
2044 | chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, | 2165 | chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, |
2045 | (op_type == CHCR_ENCRYPT_OP) ? 1 : 0, | 2166 | (op_type == CHCR_ENCRYPT_OP) ? 1 : 0, |
2046 | CHCR_SCMD_CIPHER_MODE_AES_CBC, | 2167 | CHCR_SCMD_CIPHER_MODE_AES_CBC, |
2047 | actx->auth_mode, aeadctx->hmac_ctrl, | 2168 | actx->auth_mode, aeadctx->hmac_ctrl, |
2048 | ivsize >> 1); | 2169 | IV >> 1); |
2049 | chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, | 2170 | chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, |
2050 | 0, 1, dst_size); | 2171 | 0, 0, dst_size); |
2051 | 2172 | ||
2052 | chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; | 2173 | chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; |
2053 | if (op_type == CHCR_ENCRYPT_OP) | 2174 | if (op_type == CHCR_ENCRYPT_OP) |
@@ -2060,41 +2181,312 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, | |||
2060 | memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) << | 2181 | memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) << |
2061 | 4), actx->h_iopad, kctx_len - | 2182 | 4), actx->h_iopad, kctx_len - |
2062 | (DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4)); | 2183 | (DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4)); |
2063 | 2184 | memcpy(reqctx->iv, req->iv, IV); | |
2064 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); | 2185 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); |
2065 | sg_param.nents = reqctx->dst_nents; | 2186 | ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); |
2066 | sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); | 2187 | chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid); |
2067 | sg_param.qid = qid; | 2188 | chcr_add_aead_src_ent(req, ulptx, assoclen, op_type); |
2068 | error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, | ||
2069 | reqctx->dst, &sg_param); | ||
2070 | if (error) | ||
2071 | goto dstmap_fail; | ||
2072 | |||
2073 | skb_set_transport_header(skb, transhdr_len); | ||
2074 | |||
2075 | if (assoclen) { | ||
2076 | /* AAD buffer in */ | ||
2077 | write_sg_to_skb(skb, &frags, req->src, assoclen); | ||
2078 | |||
2079 | } | ||
2080 | write_buffer_to_skb(skb, &frags, req->iv, ivsize); | ||
2081 | write_sg_to_skb(skb, &frags, src, req->cryptlen); | ||
2082 | atomic_inc(&adap->chcr_stats.cipher_rqst); | 2189 | atomic_inc(&adap->chcr_stats.cipher_rqst); |
2083 | create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1, | 2190 | temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + |
2084 | sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0); | 2191 | kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0); |
2192 | create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size, | ||
2193 | transhdr_len, temp, 0); | ||
2085 | reqctx->skb = skb; | 2194 | reqctx->skb = skb; |
2086 | skb_get(skb); | 2195 | reqctx->op = op_type; |
2087 | 2196 | ||
2088 | return skb; | 2197 | return skb; |
2089 | dstmap_fail: | ||
2090 | /* ivmap_fail: */ | ||
2091 | kfree_skb(skb); | ||
2092 | err: | 2198 | err: |
2093 | free_new_sg(reqctx->newdstsg); | 2199 | chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, |
2094 | reqctx->newdstsg = NULL; | 2200 | op_type); |
2201 | |||
2095 | return ERR_PTR(error); | 2202 | return ERR_PTR(error); |
2096 | } | 2203 | } |
2097 | 2204 | ||
2205 | static int chcr_aead_dma_map(struct device *dev, | ||
2206 | struct aead_request *req, | ||
2207 | unsigned short op_type) | ||
2208 | { | ||
2209 | int error; | ||
2210 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | ||
2211 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
2212 | unsigned int authsize = crypto_aead_authsize(tfm); | ||
2213 | int dst_size; | ||
2214 | |||
2215 | dst_size = req->assoclen + req->cryptlen + (op_type ? | ||
2216 | -authsize : authsize); | ||
2217 | if (!req->cryptlen || !dst_size) | ||
2218 | return 0; | ||
2219 | reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV, | ||
2220 | DMA_BIDIRECTIONAL); | ||
2221 | if (dma_mapping_error(dev, reqctx->iv_dma)) | ||
2222 | return -ENOMEM; | ||
2223 | |||
2224 | if (req->src == req->dst) { | ||
2225 | error = dma_map_sg(dev, req->src, sg_nents(req->src), | ||
2226 | DMA_BIDIRECTIONAL); | ||
2227 | if (!error) | ||
2228 | goto err; | ||
2229 | } else { | ||
2230 | error = dma_map_sg(dev, req->src, sg_nents(req->src), | ||
2231 | DMA_TO_DEVICE); | ||
2232 | if (!error) | ||
2233 | goto err; | ||
2234 | error = dma_map_sg(dev, req->dst, sg_nents(req->dst), | ||
2235 | DMA_FROM_DEVICE); | ||
2236 | if (!error) { | ||
2237 | dma_unmap_sg(dev, req->src, sg_nents(req->src), | ||
2238 | DMA_TO_DEVICE); | ||
2239 | goto err; | ||
2240 | } | ||
2241 | } | ||
2242 | |||
2243 | return 0; | ||
2244 | err: | ||
2245 | dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL); | ||
2246 | return -ENOMEM; | ||
2247 | } | ||
2248 | |||
2249 | static void chcr_aead_dma_unmap(struct device *dev, | ||
2250 | struct aead_request *req, | ||
2251 | unsigned short op_type) | ||
2252 | { | ||
2253 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | ||
2254 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
2255 | unsigned int authsize = crypto_aead_authsize(tfm); | ||
2256 | int dst_size; | ||
2257 | |||
2258 | dst_size = req->assoclen + req->cryptlen + (op_type ? | ||
2259 | -authsize : authsize); | ||
2260 | if (!req->cryptlen || !dst_size) | ||
2261 | return; | ||
2262 | |||
2263 | dma_unmap_single(dev, reqctx->iv_dma, IV, | ||
2264 | DMA_BIDIRECTIONAL); | ||
2265 | if (req->src == req->dst) { | ||
2266 | dma_unmap_sg(dev, req->src, sg_nents(req->src), | ||
2267 | DMA_BIDIRECTIONAL); | ||
2268 | } else { | ||
2269 | dma_unmap_sg(dev, req->src, sg_nents(req->src), | ||
2270 | DMA_TO_DEVICE); | ||
2271 | dma_unmap_sg(dev, req->dst, sg_nents(req->dst), | ||
2272 | DMA_FROM_DEVICE); | ||
2273 | } | ||
2274 | } | ||
2275 | |||
2276 | static inline void chcr_add_aead_src_ent(struct aead_request *req, | ||
2277 | struct ulptx_sgl *ulptx, | ||
2278 | unsigned int assoclen, | ||
2279 | unsigned short op_type) | ||
2280 | { | ||
2281 | struct ulptx_walk ulp_walk; | ||
2282 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | ||
2283 | |||
2284 | if (reqctx->imm) { | ||
2285 | u8 *buf = (u8 *)ulptx; | ||
2286 | |||
2287 | if (reqctx->b0_dma) { | ||
2288 | memcpy(buf, reqctx->scratch_pad, reqctx->b0_len); | ||
2289 | buf += reqctx->b0_len; | ||
2290 | } | ||
2291 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), | ||
2292 | buf, assoclen, 0); | ||
2293 | buf += assoclen; | ||
2294 | memcpy(buf, reqctx->iv, IV); | ||
2295 | buf += IV; | ||
2296 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), | ||
2297 | buf, req->cryptlen, req->assoclen); | ||
2298 | } else { | ||
2299 | ulptx_walk_init(&ulp_walk, ulptx); | ||
2300 | if (reqctx->b0_dma) | ||
2301 | ulptx_walk_add_page(&ulp_walk, reqctx->b0_len, | ||
2302 | &reqctx->b0_dma); | ||
2303 | ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0); | ||
2304 | ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma); | ||
2305 | ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen, | ||
2306 | req->assoclen); | ||
2307 | ulptx_walk_end(&ulp_walk); | ||
2308 | } | ||
2309 | } | ||
2310 | |||
2311 | static inline void chcr_add_aead_dst_ent(struct aead_request *req, | ||
2312 | struct cpl_rx_phys_dsgl *phys_cpl, | ||
2313 | unsigned int assoclen, | ||
2314 | unsigned short op_type, | ||
2315 | unsigned short qid) | ||
2316 | { | ||
2317 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | ||
2318 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
2319 | struct dsgl_walk dsgl_walk; | ||
2320 | unsigned int authsize = crypto_aead_authsize(tfm); | ||
2321 | u32 temp; | ||
2322 | |||
2323 | dsgl_walk_init(&dsgl_walk, phys_cpl); | ||
2324 | if (reqctx->b0_dma) | ||
2325 | dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma); | ||
2326 | dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0); | ||
2327 | dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma); | ||
2328 | temp = req->cryptlen + (op_type ? -authsize : authsize); | ||
2329 | dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen); | ||
2330 | dsgl_walk_end(&dsgl_walk, qid); | ||
2331 | } | ||
2332 | |||
2333 | static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req, | ||
2334 | struct ulptx_sgl *ulptx, | ||
2335 | struct cipher_wr_param *wrparam) | ||
2336 | { | ||
2337 | struct ulptx_walk ulp_walk; | ||
2338 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | ||
2339 | |||
2340 | if (reqctx->imm) { | ||
2341 | u8 *buf = (u8 *)ulptx; | ||
2342 | |||
2343 | memcpy(buf, reqctx->iv, IV); | ||
2344 | buf += IV; | ||
2345 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), | ||
2346 | buf, wrparam->bytes, reqctx->processed); | ||
2347 | } else { | ||
2348 | ulptx_walk_init(&ulp_walk, ulptx); | ||
2349 | ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma); | ||
2350 | ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes, | ||
2351 | reqctx->src_ofst); | ||
2352 | reqctx->srcsg = ulp_walk.last_sg; | ||
2353 | reqctx->src_ofst = ulp_walk.last_sg_len; | ||
2354 | ulptx_walk_end(&ulp_walk); | ||
2355 | } | ||
2356 | } | ||
2357 | |||
2358 | static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req, | ||
2359 | struct cpl_rx_phys_dsgl *phys_cpl, | ||
2360 | struct cipher_wr_param *wrparam, | ||
2361 | unsigned short qid) | ||
2362 | { | ||
2363 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | ||
2364 | struct dsgl_walk dsgl_walk; | ||
2365 | |||
2366 | dsgl_walk_init(&dsgl_walk, phys_cpl); | ||
2367 | dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma); | ||
2368 | dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes, | ||
2369 | reqctx->dst_ofst); | ||
2370 | reqctx->dstsg = dsgl_walk.last_sg; | ||
2371 | reqctx->dst_ofst = dsgl_walk.last_sg_len; | ||
2372 | |||
2373 | dsgl_walk_end(&dsgl_walk, qid); | ||
2374 | } | ||
2375 | |||
2376 | static inline void chcr_add_hash_src_ent(struct ahash_request *req, | ||
2377 | struct ulptx_sgl *ulptx, | ||
2378 | struct hash_wr_param *param) | ||
2379 | { | ||
2380 | struct ulptx_walk ulp_walk; | ||
2381 | struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); | ||
2382 | |||
2383 | if (reqctx->imm) { | ||
2384 | u8 *buf = (u8 *)ulptx; | ||
2385 | |||
2386 | if (param->bfr_len) { | ||
2387 | memcpy(buf, reqctx->reqbfr, param->bfr_len); | ||
2388 | buf += param->bfr_len; | ||
2389 | } | ||
2390 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), | ||
2391 | buf, param->sg_len, 0); | ||
2392 | } else { | ||
2393 | ulptx_walk_init(&ulp_walk, ulptx); | ||
2394 | if (param->bfr_len) | ||
2395 | ulptx_walk_add_page(&ulp_walk, param->bfr_len, | ||
2396 | &reqctx->dma_addr); | ||
2397 | ulptx_walk_add_sg(&ulp_walk, req->src, param->sg_len, | ||
2398 | 0); | ||
2399 | // reqctx->srcsg = ulp_walk.last_sg; | ||
2400 | // reqctx->src_ofst = ulp_walk.last_sg_len; | ||
2401 | ulptx_walk_end(&ulp_walk); | ||
2402 | } | ||
2403 | } | ||
2404 | |||
2405 | |||
2406 | static inline int chcr_hash_dma_map(struct device *dev, | ||
2407 | struct ahash_request *req) | ||
2408 | { | ||
2409 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | ||
2410 | int error = 0; | ||
2411 | |||
2412 | if (!req->nbytes) | ||
2413 | return 0; | ||
2414 | error = dma_map_sg(dev, req->src, sg_nents(req->src), | ||
2415 | DMA_TO_DEVICE); | ||
2416 | if (!error) | ||
2417 | return error; | ||
2418 | req_ctx->is_sg_map = 1; | ||
2419 | return 0; | ||
2420 | } | ||
2421 | |||
2422 | static inline void chcr_hash_dma_unmap(struct device *dev, | ||
2423 | struct ahash_request *req) | ||
2424 | { | ||
2425 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | ||
2426 | |||
2427 | if (!req->nbytes) | ||
2428 | return; | ||
2429 | |||
2430 | dma_unmap_sg(dev, req->src, sg_nents(req->src), | ||
2431 | DMA_TO_DEVICE); | ||
2432 | req_ctx->is_sg_map = 0; | ||
2433 | |||
2434 | } | ||
2435 | |||
2436 | |||
2437 | static int chcr_cipher_dma_map(struct device *dev, | ||
2438 | struct ablkcipher_request *req) | ||
2439 | { | ||
2440 | int error; | ||
2441 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | ||
2442 | |||
2443 | reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV, | ||
2444 | DMA_BIDIRECTIONAL); | ||
2445 | if (dma_mapping_error(dev, reqctx->iv_dma)) | ||
2446 | return -ENOMEM; | ||
2447 | |||
2448 | if (req->src == req->dst) { | ||
2449 | error = dma_map_sg(dev, req->src, sg_nents(req->src), | ||
2450 | DMA_BIDIRECTIONAL); | ||
2451 | if (!error) | ||
2452 | goto err; | ||
2453 | } else { | ||
2454 | error = dma_map_sg(dev, req->src, sg_nents(req->src), | ||
2455 | DMA_TO_DEVICE); | ||
2456 | if (!error) | ||
2457 | goto err; | ||
2458 | error = dma_map_sg(dev, req->dst, sg_nents(req->dst), | ||
2459 | DMA_FROM_DEVICE); | ||
2460 | if (!error) { | ||
2461 | dma_unmap_sg(dev, req->src, sg_nents(req->src), | ||
2462 | DMA_TO_DEVICE); | ||
2463 | goto err; | ||
2464 | } | ||
2465 | } | ||
2466 | |||
2467 | return 0; | ||
2468 | err: | ||
2469 | dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL); | ||
2470 | return -ENOMEM; | ||
2471 | } | ||
2472 | static void chcr_cipher_dma_unmap(struct device *dev, | ||
2473 | struct ablkcipher_request *req) | ||
2474 | { | ||
2475 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | ||
2476 | |||
2477 | dma_unmap_single(dev, reqctx->iv_dma, IV, | ||
2478 | DMA_BIDIRECTIONAL); | ||
2479 | if (req->src == req->dst) { | ||
2480 | dma_unmap_sg(dev, req->src, sg_nents(req->src), | ||
2481 | DMA_BIDIRECTIONAL); | ||
2482 | } else { | ||
2483 | dma_unmap_sg(dev, req->src, sg_nents(req->src), | ||
2484 | DMA_TO_DEVICE); | ||
2485 | dma_unmap_sg(dev, req->dst, sg_nents(req->dst), | ||
2486 | DMA_FROM_DEVICE); | ||
2487 | } | ||
2488 | } | ||
2489 | |||
2098 | static int set_msg_len(u8 *block, unsigned int msglen, int csize) | 2490 | static int set_msg_len(u8 *block, unsigned int msglen, int csize) |
2099 | { | 2491 | { |
2100 | __be32 data; | 2492 | __be32 data; |
@@ -2179,15 +2571,13 @@ static int ccm_format_packet(struct aead_request *req, | |||
2179 | static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, | 2571 | static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, |
2180 | unsigned int dst_size, | 2572 | unsigned int dst_size, |
2181 | struct aead_request *req, | 2573 | struct aead_request *req, |
2182 | unsigned short op_type, | 2574 | unsigned short op_type) |
2183 | struct chcr_context *chcrctx) | ||
2184 | { | 2575 | { |
2185 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 2576 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
2186 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); | 2577 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2187 | unsigned int ivsize = AES_BLOCK_SIZE; | ||
2188 | unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM; | 2578 | unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM; |
2189 | unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC; | 2579 | unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC; |
2190 | unsigned int c_id = chcrctx->dev->rx_channel_id; | 2580 | unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id; |
2191 | unsigned int ccm_xtra; | 2581 | unsigned int ccm_xtra; |
2192 | unsigned char tag_offset = 0, auth_offset = 0; | 2582 | unsigned char tag_offset = 0, auth_offset = 0; |
2193 | unsigned int assoclen; | 2583 | unsigned int assoclen; |
@@ -2200,7 +2590,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, | |||
2200 | ((assoclen) ? CCM_AAD_FIELD_SIZE : 0); | 2590 | ((assoclen) ? CCM_AAD_FIELD_SIZE : 0); |
2201 | 2591 | ||
2202 | auth_offset = req->cryptlen ? | 2592 | auth_offset = req->cryptlen ? |
2203 | (assoclen + ivsize + 1 + ccm_xtra) : 0; | 2593 | (assoclen + IV + 1 + ccm_xtra) : 0; |
2204 | if (op_type == CHCR_DECRYPT_OP) { | 2594 | if (op_type == CHCR_DECRYPT_OP) { |
2205 | if (crypto_aead_authsize(tfm) != req->cryptlen) | 2595 | if (crypto_aead_authsize(tfm) != req->cryptlen) |
2206 | tag_offset = crypto_aead_authsize(tfm); | 2596 | tag_offset = crypto_aead_authsize(tfm); |
@@ -2210,14 +2600,13 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, | |||
2210 | 2600 | ||
2211 | 2601 | ||
2212 | sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id, | 2602 | sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id, |
2213 | 2, (ivsize ? (assoclen + 1) : 0) + | 2603 | 2, assoclen + 1 + ccm_xtra); |
2214 | ccm_xtra); | ||
2215 | sec_cpl->pldlen = | 2604 | sec_cpl->pldlen = |
2216 | htonl(assoclen + ivsize + req->cryptlen + ccm_xtra); | 2605 | htonl(assoclen + IV + req->cryptlen + ccm_xtra); |
2217 | /* For CCM there wil be b0 always. So AAD start will be 1 always */ | 2606 | /* For CCM there wil be b0 always. So AAD start will be 1 always */ |
2218 | sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( | 2607 | sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( |
2219 | 1, assoclen + ccm_xtra, assoclen | 2608 | 1, assoclen + ccm_xtra, assoclen |
2220 | + ivsize + 1 + ccm_xtra, 0); | 2609 | + IV + 1 + ccm_xtra, 0); |
2221 | 2610 | ||
2222 | sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0, | 2611 | sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0, |
2223 | auth_offset, tag_offset, | 2612 | auth_offset, tag_offset, |
@@ -2226,10 +2615,10 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, | |||
2226 | sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, | 2615 | sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, |
2227 | (op_type == CHCR_ENCRYPT_OP) ? 0 : 1, | 2616 | (op_type == CHCR_ENCRYPT_OP) ? 0 : 1, |
2228 | cipher_mode, mac_mode, | 2617 | cipher_mode, mac_mode, |
2229 | aeadctx->hmac_ctrl, ivsize >> 1); | 2618 | aeadctx->hmac_ctrl, IV >> 1); |
2230 | 2619 | ||
2231 | sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0, | 2620 | sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0, |
2232 | 1, dst_size); | 2621 | 0, dst_size); |
2233 | } | 2622 | } |
2234 | 2623 | ||
2235 | int aead_ccm_validate_input(unsigned short op_type, | 2624 | int aead_ccm_validate_input(unsigned short op_type, |
@@ -2249,131 +2638,83 @@ int aead_ccm_validate_input(unsigned short op_type, | |||
2249 | return -EINVAL; | 2638 | return -EINVAL; |
2250 | } | 2639 | } |
2251 | } | 2640 | } |
2252 | if (aeadctx->enckey_len == 0) { | ||
2253 | pr_err("CCM: Encryption key not set\n"); | ||
2254 | return -EINVAL; | ||
2255 | } | ||
2256 | return 0; | 2641 | return 0; |
2257 | } | 2642 | } |
2258 | 2643 | ||
2259 | unsigned int fill_aead_req_fields(struct sk_buff *skb, | ||
2260 | struct aead_request *req, | ||
2261 | struct scatterlist *src, | ||
2262 | unsigned int ivsize, | ||
2263 | struct chcr_aead_ctx *aeadctx) | ||
2264 | { | ||
2265 | unsigned int frags = 0; | ||
2266 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
2267 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | ||
2268 | /* b0 and aad length(if available) */ | ||
2269 | |||
2270 | write_buffer_to_skb(skb, &frags, reqctx->scratch_pad, CCM_B0_SIZE + | ||
2271 | (req->assoclen ? CCM_AAD_FIELD_SIZE : 0)); | ||
2272 | if (req->assoclen) { | ||
2273 | if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) | ||
2274 | write_sg_to_skb(skb, &frags, req->src, | ||
2275 | req->assoclen - 8); | ||
2276 | else | ||
2277 | write_sg_to_skb(skb, &frags, req->src, req->assoclen); | ||
2278 | } | ||
2279 | write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize); | ||
2280 | if (req->cryptlen) | ||
2281 | write_sg_to_skb(skb, &frags, src, req->cryptlen); | ||
2282 | |||
2283 | return frags; | ||
2284 | } | ||
2285 | |||
2286 | static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, | 2644 | static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, |
2287 | unsigned short qid, | 2645 | unsigned short qid, |
2288 | int size, | 2646 | int size, |
2289 | unsigned short op_type) | 2647 | unsigned short op_type) |
2290 | { | 2648 | { |
2291 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 2649 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
2292 | struct chcr_context *ctx = crypto_aead_ctx(tfm); | 2650 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2293 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | ||
2294 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | ||
2295 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | 2651 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); |
2296 | struct sk_buff *skb = NULL; | 2652 | struct sk_buff *skb = NULL; |
2297 | struct chcr_wr *chcr_req; | 2653 | struct chcr_wr *chcr_req; |
2298 | struct cpl_rx_phys_dsgl *phys_cpl; | 2654 | struct cpl_rx_phys_dsgl *phys_cpl; |
2299 | struct phys_sge_parm sg_param; | 2655 | struct ulptx_sgl *ulptx; |
2300 | struct scatterlist *src; | 2656 | unsigned int transhdr_len; |
2301 | unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE; | 2657 | unsigned int dst_size = 0, kctx_len, dnents, temp; |
2302 | unsigned int dst_size = 0, kctx_len, nents; | 2658 | unsigned int sub_type, assoclen = req->assoclen; |
2303 | unsigned int sub_type; | ||
2304 | unsigned int authsize = crypto_aead_authsize(tfm); | 2659 | unsigned int authsize = crypto_aead_authsize(tfm); |
2305 | int error = -EINVAL, src_nent; | 2660 | int error = -EINVAL; |
2306 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | 2661 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
2307 | GFP_ATOMIC; | 2662 | GFP_ATOMIC; |
2308 | struct adapter *adap = padap(ctx->dev); | 2663 | struct adapter *adap = padap(a_ctx(tfm)->dev); |
2309 | 2664 | ||
2310 | dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize : | 2665 | reqctx->b0_dma = 0; |
2666 | sub_type = get_aead_subtype(tfm); | ||
2667 | if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) | ||
2668 | assoclen -= 8; | ||
2669 | dst_size = assoclen + req->cryptlen + (op_type ? -authsize : | ||
2311 | authsize); | 2670 | authsize); |
2312 | reqctx->newdstsg = NULL; | 2671 | error = chcr_aead_common_init(req, op_type); |
2313 | if (op_type && req->cryptlen < crypto_aead_authsize(tfm)) | 2672 | if (error) |
2314 | goto err; | 2673 | return ERR_PTR(error); |
2315 | src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen); | ||
2316 | if (src_nent < 0) | ||
2317 | goto err; | ||
2318 | 2674 | ||
2319 | sub_type = get_aead_subtype(tfm); | 2675 | |
2320 | src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen); | 2676 | reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0); |
2321 | if (req->src != req->dst) { | ||
2322 | error = chcr_copy_assoc(req, aeadctx); | ||
2323 | if (error) { | ||
2324 | pr_err("AAD copy to destination buffer fails\n"); | ||
2325 | return ERR_PTR(error); | ||
2326 | } | ||
2327 | } | ||
2328 | if (dst_size && is_newsg(req->dst, &nents)) { | ||
2329 | reqctx->newdstsg = alloc_new_sg(req->dst, nents); | ||
2330 | if (IS_ERR(reqctx->newdstsg)) | ||
2331 | return ERR_CAST(reqctx->newdstsg); | ||
2332 | reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, | ||
2333 | reqctx->newdstsg, req->assoclen); | ||
2334 | } else { | ||
2335 | if (req->src == req->dst) | ||
2336 | reqctx->dst = src; | ||
2337 | else | ||
2338 | reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, | ||
2339 | req->dst, req->assoclen); | ||
2340 | } | ||
2341 | reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen + | ||
2342 | (op_type ? -authsize : authsize)); | ||
2343 | if (reqctx->dst_nents < 0) { | ||
2344 | pr_err("CCM:Invalid Destination sg entries\n"); | ||
2345 | error = -EINVAL; | ||
2346 | goto err; | ||
2347 | } | ||
2348 | error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type); | 2677 | error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type); |
2349 | if (error) | 2678 | if (error) |
2350 | goto err; | 2679 | goto err; |
2351 | 2680 | if (dst_size) { | |
2352 | dst_size = get_space_for_phys_dsgl(reqctx->dst_nents); | 2681 | dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); |
2682 | dnents += sg_nents_xlen(req->dst, req->cryptlen | ||
2683 | + (op_type ? -authsize : authsize), | ||
2684 | CHCR_DST_SG_SIZE, req->assoclen); | ||
2685 | dnents += MIN_CCM_SG; // For IV and B0 | ||
2686 | } else { | ||
2687 | dnents = 0; | ||
2688 | } | ||
2689 | dst_size = get_space_for_phys_dsgl(dnents); | ||
2353 | kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2; | 2690 | kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2; |
2354 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); | 2691 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); |
2355 | if (chcr_aead_need_fallback(req, src_nent + MIN_CCM_SG, | 2692 | reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen + |
2356 | T6_MAX_AAD_SIZE - 18, | 2693 | reqctx->b0_len) <= SGE_MAX_WR_LEN; |
2357 | transhdr_len + (sgl_len(src_nent + MIN_CCM_SG) * 8), | 2694 | temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + req->cryptlen + |
2358 | op_type)) { | 2695 | reqctx->b0_len), 16) * 16) : |
2696 | (sgl_len(reqctx->src_nents + reqctx->aad_nents + | ||
2697 | MIN_CCM_SG) * 8); | ||
2698 | transhdr_len += temp; | ||
2699 | transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16; | ||
2700 | |||
2701 | if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE - | ||
2702 | reqctx->b0_len, transhdr_len, op_type)) { | ||
2359 | atomic_inc(&adap->chcr_stats.fallback); | 2703 | atomic_inc(&adap->chcr_stats.fallback); |
2360 | free_new_sg(reqctx->newdstsg); | 2704 | chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, |
2361 | reqctx->newdstsg = NULL; | 2705 | op_type); |
2362 | return ERR_PTR(chcr_aead_fallback(req, op_type)); | 2706 | return ERR_PTR(chcr_aead_fallback(req, op_type)); |
2363 | } | 2707 | } |
2364 | 2708 | skb = alloc_skb(SGE_MAX_WR_LEN, flags); | |
2365 | skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); | ||
2366 | 2709 | ||
2367 | if (!skb) { | 2710 | if (!skb) { |
2368 | error = -ENOMEM; | 2711 | error = -ENOMEM; |
2369 | goto err; | 2712 | goto err; |
2370 | } | 2713 | } |
2371 | 2714 | ||
2372 | skb_reserve(skb, sizeof(struct sge_opaque_hdr)); | 2715 | chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len); |
2373 | |||
2374 | chcr_req = __skb_put_zero(skb, transhdr_len); | ||
2375 | 2716 | ||
2376 | fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type, ctx); | 2717 | fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type); |
2377 | 2718 | ||
2378 | chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; | 2719 | chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; |
2379 | memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); | 2720 | memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); |
@@ -2381,31 +2722,37 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, | |||
2381 | 16), aeadctx->key, aeadctx->enckey_len); | 2722 | 16), aeadctx->key, aeadctx->enckey_len); |
2382 | 2723 | ||
2383 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); | 2724 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); |
2725 | ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); | ||
2384 | error = ccm_format_packet(req, aeadctx, sub_type, op_type); | 2726 | error = ccm_format_packet(req, aeadctx, sub_type, op_type); |
2385 | if (error) | 2727 | if (error) |
2386 | goto dstmap_fail; | 2728 | goto dstmap_fail; |
2387 | 2729 | ||
2388 | sg_param.nents = reqctx->dst_nents; | 2730 | reqctx->b0_dma = dma_map_single(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, |
2389 | sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); | 2731 | &reqctx->scratch_pad, reqctx->b0_len, |
2390 | sg_param.qid = qid; | 2732 | DMA_BIDIRECTIONAL); |
2391 | error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, | 2733 | if (dma_mapping_error(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, |
2392 | reqctx->dst, &sg_param); | 2734 | reqctx->b0_dma)) { |
2393 | if (error) | 2735 | error = -ENOMEM; |
2394 | goto dstmap_fail; | 2736 | goto dstmap_fail; |
2737 | } | ||
2738 | |||
2739 | chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid); | ||
2740 | chcr_add_aead_src_ent(req, ulptx, assoclen, op_type); | ||
2395 | 2741 | ||
2396 | skb_set_transport_header(skb, transhdr_len); | ||
2397 | frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx); | ||
2398 | atomic_inc(&adap->chcr_stats.aead_rqst); | 2742 | atomic_inc(&adap->chcr_stats.aead_rqst); |
2399 | create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, 0, 1, | 2743 | temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + |
2400 | sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0); | 2744 | kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen + |
2745 | reqctx->b0_len) : 0); | ||
2746 | create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0, | ||
2747 | transhdr_len, temp, 0); | ||
2401 | reqctx->skb = skb; | 2748 | reqctx->skb = skb; |
2402 | skb_get(skb); | 2749 | reqctx->op = op_type; |
2750 | |||
2403 | return skb; | 2751 | return skb; |
2404 | dstmap_fail: | 2752 | dstmap_fail: |
2405 | kfree_skb(skb); | 2753 | kfree_skb(skb); |
2406 | err: | 2754 | err: |
2407 | free_new_sg(reqctx->newdstsg); | 2755 | chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type); |
2408 | reqctx->newdstsg = NULL; | ||
2409 | return ERR_PTR(error); | 2756 | return ERR_PTR(error); |
2410 | } | 2757 | } |
2411 | 2758 | ||
@@ -2415,115 +2762,84 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, | |||
2415 | unsigned short op_type) | 2762 | unsigned short op_type) |
2416 | { | 2763 | { |
2417 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 2764 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
2418 | struct chcr_context *ctx = crypto_aead_ctx(tfm); | 2765 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2419 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | ||
2420 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | ||
2421 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | 2766 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); |
2422 | struct sk_buff *skb = NULL; | 2767 | struct sk_buff *skb = NULL; |
2423 | struct chcr_wr *chcr_req; | 2768 | struct chcr_wr *chcr_req; |
2424 | struct cpl_rx_phys_dsgl *phys_cpl; | 2769 | struct cpl_rx_phys_dsgl *phys_cpl; |
2425 | struct phys_sge_parm sg_param; | 2770 | struct ulptx_sgl *ulptx; |
2426 | struct scatterlist *src; | 2771 | unsigned int transhdr_len, dnents = 0; |
2427 | unsigned int frags = 0, transhdr_len; | 2772 | unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen; |
2428 | unsigned int ivsize = AES_BLOCK_SIZE; | ||
2429 | unsigned int dst_size = 0, kctx_len, nents, assoclen = req->assoclen; | ||
2430 | unsigned char tag_offset = 0; | ||
2431 | unsigned int authsize = crypto_aead_authsize(tfm); | 2773 | unsigned int authsize = crypto_aead_authsize(tfm); |
2432 | int error = -EINVAL, src_nent; | 2774 | int error = -EINVAL; |
2433 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | 2775 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
2434 | GFP_ATOMIC; | 2776 | GFP_ATOMIC; |
2435 | struct adapter *adap = padap(ctx->dev); | 2777 | struct adapter *adap = padap(a_ctx(tfm)->dev); |
2436 | |||
2437 | reqctx->newdstsg = NULL; | ||
2438 | dst_size = assoclen + req->cryptlen + (op_type ? -authsize : | ||
2439 | authsize); | ||
2440 | /* validate key size */ | ||
2441 | if (aeadctx->enckey_len == 0) | ||
2442 | goto err; | ||
2443 | 2778 | ||
2444 | if (op_type && req->cryptlen < crypto_aead_authsize(tfm)) | 2779 | if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) |
2445 | goto err; | 2780 | assoclen = req->assoclen - 8; |
2446 | src_nent = sg_nents_for_len(req->src, assoclen + req->cryptlen); | ||
2447 | if (src_nent < 0) | ||
2448 | goto err; | ||
2449 | 2781 | ||
2450 | src = scatterwalk_ffwd(reqctx->srcffwd, req->src, assoclen); | 2782 | reqctx->b0_dma = 0; |
2451 | if (req->src != req->dst) { | 2783 | dst_size = assoclen + req->cryptlen + (op_type ? -authsize : authsize); |
2452 | error = chcr_copy_assoc(req, aeadctx); | 2784 | error = chcr_aead_common_init(req, op_type); |
2453 | if (error) | 2785 | if (error) |
2454 | return ERR_PTR(error); | 2786 | return ERR_PTR(error); |
2455 | } | 2787 | if (dst_size) { |
2456 | 2788 | dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); | |
2457 | if (dst_size && is_newsg(req->dst, &nents)) { | 2789 | dnents += sg_nents_xlen(req->dst, |
2458 | reqctx->newdstsg = alloc_new_sg(req->dst, nents); | 2790 | req->cryptlen + (op_type ? -authsize : authsize), |
2459 | if (IS_ERR(reqctx->newdstsg)) | 2791 | CHCR_DST_SG_SIZE, req->assoclen); |
2460 | return ERR_CAST(reqctx->newdstsg); | 2792 | dnents += MIN_GCM_SG; // For IV |
2461 | reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, | ||
2462 | reqctx->newdstsg, assoclen); | ||
2463 | } else { | 2793 | } else { |
2464 | if (req->src == req->dst) | 2794 | dnents = 0; |
2465 | reqctx->dst = src; | ||
2466 | else | ||
2467 | reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, | ||
2468 | req->dst, assoclen); | ||
2469 | } | ||
2470 | |||
2471 | reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen + | ||
2472 | (op_type ? -authsize : authsize)); | ||
2473 | if (reqctx->dst_nents < 0) { | ||
2474 | pr_err("GCM:Invalid Destination sg entries\n"); | ||
2475 | error = -EINVAL; | ||
2476 | goto err; | ||
2477 | } | 2795 | } |
2478 | 2796 | dst_size = get_space_for_phys_dsgl(dnents); | |
2479 | |||
2480 | dst_size = get_space_for_phys_dsgl(reqctx->dst_nents); | ||
2481 | kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) + | 2797 | kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) + |
2482 | AEAD_H_SIZE; | 2798 | AEAD_H_SIZE; |
2483 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); | 2799 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); |
2484 | if (chcr_aead_need_fallback(req, src_nent + MIN_GCM_SG, | 2800 | reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <= |
2485 | T6_MAX_AAD_SIZE, | 2801 | SGE_MAX_WR_LEN; |
2486 | transhdr_len + (sgl_len(src_nent + MIN_GCM_SG) * 8), | 2802 | temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + |
2487 | op_type)) { | 2803 | req->cryptlen), 16) * 16) : (sgl_len(reqctx->src_nents + |
2804 | reqctx->aad_nents + MIN_GCM_SG) * 8); | ||
2805 | transhdr_len += temp; | ||
2806 | transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16; | ||
2807 | if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE, | ||
2808 | transhdr_len, op_type)) { | ||
2488 | atomic_inc(&adap->chcr_stats.fallback); | 2809 | atomic_inc(&adap->chcr_stats.fallback); |
2489 | free_new_sg(reqctx->newdstsg); | 2810 | chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, |
2490 | reqctx->newdstsg = NULL; | 2811 | op_type); |
2491 | return ERR_PTR(chcr_aead_fallback(req, op_type)); | 2812 | return ERR_PTR(chcr_aead_fallback(req, op_type)); |
2492 | } | 2813 | } |
2493 | skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); | 2814 | skb = alloc_skb(SGE_MAX_WR_LEN, flags); |
2494 | if (!skb) { | 2815 | if (!skb) { |
2495 | error = -ENOMEM; | 2816 | error = -ENOMEM; |
2496 | goto err; | 2817 | goto err; |
2497 | } | 2818 | } |
2498 | 2819 | ||
2499 | /* NIC driver is going to write the sge hdr. */ | ||
2500 | skb_reserve(skb, sizeof(struct sge_opaque_hdr)); | ||
2501 | |||
2502 | chcr_req = __skb_put_zero(skb, transhdr_len); | 2820 | chcr_req = __skb_put_zero(skb, transhdr_len); |
2503 | 2821 | ||
2504 | if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) | 2822 | //Offset of tag from end |
2505 | assoclen = req->assoclen - 8; | 2823 | temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize; |
2506 | |||
2507 | tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize; | ||
2508 | chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR( | 2824 | chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR( |
2509 | ctx->dev->rx_channel_id, 2, (ivsize ? | 2825 | a_ctx(tfm)->dev->rx_channel_id, 2, |
2510 | (assoclen + 1) : 0)); | 2826 | (assoclen + 1)); |
2511 | chcr_req->sec_cpl.pldlen = | 2827 | chcr_req->sec_cpl.pldlen = |
2512 | htonl(assoclen + ivsize + req->cryptlen); | 2828 | htonl(assoclen + IV + req->cryptlen); |
2513 | chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( | 2829 | chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( |
2514 | assoclen ? 1 : 0, assoclen, | 2830 | assoclen ? 1 : 0, assoclen, |
2515 | assoclen + ivsize + 1, 0); | 2831 | assoclen + IV + 1, 0); |
2516 | chcr_req->sec_cpl.cipherstop_lo_authinsert = | 2832 | chcr_req->sec_cpl.cipherstop_lo_authinsert = |
2517 | FILL_SEC_CPL_AUTHINSERT(0, assoclen + ivsize + 1, | 2833 | FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1, |
2518 | tag_offset, tag_offset); | 2834 | temp, temp); |
2519 | chcr_req->sec_cpl.seqno_numivs = | 2835 | chcr_req->sec_cpl.seqno_numivs = |
2520 | FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type == | 2836 | FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type == |
2521 | CHCR_ENCRYPT_OP) ? 1 : 0, | 2837 | CHCR_ENCRYPT_OP) ? 1 : 0, |
2522 | CHCR_SCMD_CIPHER_MODE_AES_GCM, | 2838 | CHCR_SCMD_CIPHER_MODE_AES_GCM, |
2523 | CHCR_SCMD_AUTH_MODE_GHASH, | 2839 | CHCR_SCMD_AUTH_MODE_GHASH, |
2524 | aeadctx->hmac_ctrl, ivsize >> 1); | 2840 | aeadctx->hmac_ctrl, IV >> 1); |
2525 | chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, | 2841 | chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, |
2526 | 0, 1, dst_size); | 2842 | 0, 0, dst_size); |
2527 | chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; | 2843 | chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; |
2528 | memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); | 2844 | memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); |
2529 | memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) * | 2845 | memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) * |
@@ -2534,39 +2850,28 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, | |||
2534 | if (get_aead_subtype(tfm) == | 2850 | if (get_aead_subtype(tfm) == |
2535 | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) { | 2851 | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) { |
2536 | memcpy(reqctx->iv, aeadctx->salt, 4); | 2852 | memcpy(reqctx->iv, aeadctx->salt, 4); |
2537 | memcpy(reqctx->iv + 4, req->iv, 8); | 2853 | memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE); |
2538 | } else { | 2854 | } else { |
2539 | memcpy(reqctx->iv, req->iv, 12); | 2855 | memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE); |
2540 | } | 2856 | } |
2541 | *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01); | 2857 | *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01); |
2542 | 2858 | ||
2543 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); | 2859 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); |
2544 | sg_param.nents = reqctx->dst_nents; | 2860 | ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); |
2545 | sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); | ||
2546 | sg_param.qid = qid; | ||
2547 | error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, | ||
2548 | reqctx->dst, &sg_param); | ||
2549 | if (error) | ||
2550 | goto dstmap_fail; | ||
2551 | 2861 | ||
2552 | skb_set_transport_header(skb, transhdr_len); | 2862 | chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid); |
2553 | write_sg_to_skb(skb, &frags, req->src, assoclen); | 2863 | chcr_add_aead_src_ent(req, ulptx, assoclen, op_type); |
2554 | write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize); | ||
2555 | write_sg_to_skb(skb, &frags, src, req->cryptlen); | ||
2556 | atomic_inc(&adap->chcr_stats.aead_rqst); | 2864 | atomic_inc(&adap->chcr_stats.aead_rqst); |
2557 | create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1, | 2865 | temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + |
2558 | sizeof(struct cpl_rx_phys_dsgl) + dst_size, | 2866 | kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0); |
2559 | reqctx->verify); | 2867 | create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size, |
2868 | transhdr_len, temp, reqctx->verify); | ||
2560 | reqctx->skb = skb; | 2869 | reqctx->skb = skb; |
2561 | skb_get(skb); | 2870 | reqctx->op = op_type; |
2562 | return skb; | 2871 | return skb; |
2563 | 2872 | ||
2564 | dstmap_fail: | ||
2565 | /* ivmap_fail: */ | ||
2566 | kfree_skb(skb); | ||
2567 | err: | 2873 | err: |
2568 | free_new_sg(reqctx->newdstsg); | 2874 | chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type); |
2569 | reqctx->newdstsg = NULL; | ||
2570 | return ERR_PTR(error); | 2875 | return ERR_PTR(error); |
2571 | } | 2876 | } |
2572 | 2877 | ||
@@ -2574,8 +2879,7 @@ err: | |||
2574 | 2879 | ||
2575 | static int chcr_aead_cra_init(struct crypto_aead *tfm) | 2880 | static int chcr_aead_cra_init(struct crypto_aead *tfm) |
2576 | { | 2881 | { |
2577 | struct chcr_context *ctx = crypto_aead_ctx(tfm); | 2882 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2578 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | ||
2579 | struct aead_alg *alg = crypto_aead_alg(tfm); | 2883 | struct aead_alg *alg = crypto_aead_alg(tfm); |
2580 | 2884 | ||
2581 | aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0, | 2885 | aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0, |
@@ -2586,25 +2890,20 @@ static int chcr_aead_cra_init(struct crypto_aead *tfm) | |||
2586 | crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx), | 2890 | crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx), |
2587 | sizeof(struct aead_request) + | 2891 | sizeof(struct aead_request) + |
2588 | crypto_aead_reqsize(aeadctx->sw_cipher))); | 2892 | crypto_aead_reqsize(aeadctx->sw_cipher))); |
2589 | aeadctx->null = crypto_get_default_null_skcipher(); | 2893 | return chcr_device_init(a_ctx(tfm)); |
2590 | if (IS_ERR(aeadctx->null)) | ||
2591 | return PTR_ERR(aeadctx->null); | ||
2592 | return chcr_device_init(ctx); | ||
2593 | } | 2894 | } |
2594 | 2895 | ||
2595 | static void chcr_aead_cra_exit(struct crypto_aead *tfm) | 2896 | static void chcr_aead_cra_exit(struct crypto_aead *tfm) |
2596 | { | 2897 | { |
2597 | struct chcr_context *ctx = crypto_aead_ctx(tfm); | 2898 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2598 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | ||
2599 | 2899 | ||
2600 | crypto_put_default_null_skcipher(); | ||
2601 | crypto_free_aead(aeadctx->sw_cipher); | 2900 | crypto_free_aead(aeadctx->sw_cipher); |
2602 | } | 2901 | } |
2603 | 2902 | ||
2604 | static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm, | 2903 | static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm, |
2605 | unsigned int authsize) | 2904 | unsigned int authsize) |
2606 | { | 2905 | { |
2607 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); | 2906 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2608 | 2907 | ||
2609 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP; | 2908 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP; |
2610 | aeadctx->mayverify = VERIFY_HW; | 2909 | aeadctx->mayverify = VERIFY_HW; |
@@ -2613,7 +2912,7 @@ static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm, | |||
2613 | static int chcr_authenc_setauthsize(struct crypto_aead *tfm, | 2912 | static int chcr_authenc_setauthsize(struct crypto_aead *tfm, |
2614 | unsigned int authsize) | 2913 | unsigned int authsize) |
2615 | { | 2914 | { |
2616 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); | 2915 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2617 | u32 maxauth = crypto_aead_maxauthsize(tfm); | 2916 | u32 maxauth = crypto_aead_maxauthsize(tfm); |
2618 | 2917 | ||
2619 | /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not | 2918 | /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not |
@@ -2651,7 +2950,7 @@ static int chcr_authenc_setauthsize(struct crypto_aead *tfm, | |||
2651 | 2950 | ||
2652 | static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) | 2951 | static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) |
2653 | { | 2952 | { |
2654 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); | 2953 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2655 | 2954 | ||
2656 | switch (authsize) { | 2955 | switch (authsize) { |
2657 | case ICV_4: | 2956 | case ICV_4: |
@@ -2691,7 +2990,7 @@ static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) | |||
2691 | static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm, | 2990 | static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm, |
2692 | unsigned int authsize) | 2991 | unsigned int authsize) |
2693 | { | 2992 | { |
2694 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); | 2993 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2695 | 2994 | ||
2696 | switch (authsize) { | 2995 | switch (authsize) { |
2697 | case ICV_8: | 2996 | case ICV_8: |
@@ -2717,7 +3016,7 @@ static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm, | |||
2717 | static int chcr_ccm_setauthsize(struct crypto_aead *tfm, | 3016 | static int chcr_ccm_setauthsize(struct crypto_aead *tfm, |
2718 | unsigned int authsize) | 3017 | unsigned int authsize) |
2719 | { | 3018 | { |
2720 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); | 3019 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2721 | 3020 | ||
2722 | switch (authsize) { | 3021 | switch (authsize) { |
2723 | case ICV_4: | 3022 | case ICV_4: |
@@ -2760,8 +3059,7 @@ static int chcr_ccm_common_setkey(struct crypto_aead *aead, | |||
2760 | const u8 *key, | 3059 | const u8 *key, |
2761 | unsigned int keylen) | 3060 | unsigned int keylen) |
2762 | { | 3061 | { |
2763 | struct chcr_context *ctx = crypto_aead_ctx(aead); | 3062 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); |
2764 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | ||
2765 | unsigned char ck_size, mk_size; | 3063 | unsigned char ck_size, mk_size; |
2766 | int key_ctx_size = 0; | 3064 | int key_ctx_size = 0; |
2767 | 3065 | ||
@@ -2794,8 +3092,7 @@ static int chcr_aead_ccm_setkey(struct crypto_aead *aead, | |||
2794 | const u8 *key, | 3092 | const u8 *key, |
2795 | unsigned int keylen) | 3093 | unsigned int keylen) |
2796 | { | 3094 | { |
2797 | struct chcr_context *ctx = crypto_aead_ctx(aead); | 3095 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); |
2798 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | ||
2799 | int error; | 3096 | int error; |
2800 | 3097 | ||
2801 | crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); | 3098 | crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); |
@@ -2813,8 +3110,7 @@ static int chcr_aead_ccm_setkey(struct crypto_aead *aead, | |||
2813 | static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key, | 3110 | static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key, |
2814 | unsigned int keylen) | 3111 | unsigned int keylen) |
2815 | { | 3112 | { |
2816 | struct chcr_context *ctx = crypto_aead_ctx(aead); | 3113 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); |
2817 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | ||
2818 | int error; | 3114 | int error; |
2819 | 3115 | ||
2820 | if (keylen < 3) { | 3116 | if (keylen < 3) { |
@@ -2840,8 +3136,7 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key, | |||
2840 | static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, | 3136 | static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, |
2841 | unsigned int keylen) | 3137 | unsigned int keylen) |
2842 | { | 3138 | { |
2843 | struct chcr_context *ctx = crypto_aead_ctx(aead); | 3139 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); |
2844 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | ||
2845 | struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx); | 3140 | struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx); |
2846 | struct crypto_cipher *cipher; | 3141 | struct crypto_cipher *cipher; |
2847 | unsigned int ck_size; | 3142 | unsigned int ck_size; |
@@ -2913,8 +3208,7 @@ out: | |||
2913 | static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, | 3208 | static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, |
2914 | unsigned int keylen) | 3209 | unsigned int keylen) |
2915 | { | 3210 | { |
2916 | struct chcr_context *ctx = crypto_aead_ctx(authenc); | 3211 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc)); |
2917 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | ||
2918 | struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); | 3212 | struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); |
2919 | /* it contains auth and cipher key both*/ | 3213 | /* it contains auth and cipher key both*/ |
2920 | struct crypto_authenc_keys keys; | 3214 | struct crypto_authenc_keys keys; |
@@ -3034,8 +3328,7 @@ out: | |||
3034 | static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc, | 3328 | static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc, |
3035 | const u8 *key, unsigned int keylen) | 3329 | const u8 *key, unsigned int keylen) |
3036 | { | 3330 | { |
3037 | struct chcr_context *ctx = crypto_aead_ctx(authenc); | 3331 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc)); |
3038 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | ||
3039 | struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); | 3332 | struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); |
3040 | struct crypto_authenc_keys keys; | 3333 | struct crypto_authenc_keys keys; |
3041 | int err; | 3334 | int err; |
@@ -3107,7 +3400,7 @@ static int chcr_aead_encrypt(struct aead_request *req) | |||
3107 | static int chcr_aead_decrypt(struct aead_request *req) | 3400 | static int chcr_aead_decrypt(struct aead_request *req) |
3108 | { | 3401 | { |
3109 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 3402 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
3110 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); | 3403 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
3111 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | 3404 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); |
3112 | int size; | 3405 | int size; |
3113 | 3406 | ||
@@ -3140,30 +3433,29 @@ static int chcr_aead_op(struct aead_request *req, | |||
3140 | create_wr_t create_wr_fn) | 3433 | create_wr_t create_wr_fn) |
3141 | { | 3434 | { |
3142 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 3435 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
3143 | struct chcr_context *ctx = crypto_aead_ctx(tfm); | ||
3144 | struct uld_ctx *u_ctx; | 3436 | struct uld_ctx *u_ctx; |
3145 | struct sk_buff *skb; | 3437 | struct sk_buff *skb; |
3146 | 3438 | ||
3147 | if (!ctx->dev) { | 3439 | if (!a_ctx(tfm)->dev) { |
3148 | pr_err("chcr : %s : No crypto device.\n", __func__); | 3440 | pr_err("chcr : %s : No crypto device.\n", __func__); |
3149 | return -ENXIO; | 3441 | return -ENXIO; |
3150 | } | 3442 | } |
3151 | u_ctx = ULD_CTX(ctx); | 3443 | u_ctx = ULD_CTX(a_ctx(tfm)); |
3152 | if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], | 3444 | if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], |
3153 | ctx->tx_qidx)) { | 3445 | a_ctx(tfm)->tx_qidx)) { |
3154 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | 3446 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
3155 | return -EBUSY; | 3447 | return -EBUSY; |
3156 | } | 3448 | } |
3157 | 3449 | ||
3158 | /* Form a WR from req */ | 3450 | /* Form a WR from req */ |
3159 | skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], size, | 3451 | skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size, |
3160 | op_type); | 3452 | op_type); |
3161 | 3453 | ||
3162 | if (IS_ERR(skb) || !skb) | 3454 | if (IS_ERR(skb) || !skb) |
3163 | return PTR_ERR(skb); | 3455 | return PTR_ERR(skb); |
3164 | 3456 | ||
3165 | skb->dev = u_ctx->lldi.ports[0]; | 3457 | skb->dev = u_ctx->lldi.ports[0]; |
3166 | set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); | 3458 | set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx); |
3167 | chcr_send_wr(skb); | 3459 | chcr_send_wr(skb); |
3168 | return -EINPROGRESS; | 3460 | return -EINPROGRESS; |
3169 | } | 3461 | } |
@@ -3385,7 +3677,7 @@ static struct chcr_alg_template driver_algs[] = { | |||
3385 | sizeof(struct chcr_aead_ctx) + | 3677 | sizeof(struct chcr_aead_ctx) + |
3386 | sizeof(struct chcr_gcm_ctx), | 3678 | sizeof(struct chcr_gcm_ctx), |
3387 | }, | 3679 | }, |
3388 | .ivsize = 12, | 3680 | .ivsize = GCM_AES_IV_SIZE, |
3389 | .maxauthsize = GHASH_DIGEST_SIZE, | 3681 | .maxauthsize = GHASH_DIGEST_SIZE, |
3390 | .setkey = chcr_gcm_setkey, | 3682 | .setkey = chcr_gcm_setkey, |
3391 | .setauthsize = chcr_gcm_setauthsize, | 3683 | .setauthsize = chcr_gcm_setauthsize, |
@@ -3405,7 +3697,7 @@ static struct chcr_alg_template driver_algs[] = { | |||
3405 | sizeof(struct chcr_gcm_ctx), | 3697 | sizeof(struct chcr_gcm_ctx), |
3406 | 3698 | ||
3407 | }, | 3699 | }, |
3408 | .ivsize = 8, | 3700 | .ivsize = GCM_RFC4106_IV_SIZE, |
3409 | .maxauthsize = GHASH_DIGEST_SIZE, | 3701 | .maxauthsize = GHASH_DIGEST_SIZE, |
3410 | .setkey = chcr_gcm_setkey, | 3702 | .setkey = chcr_gcm_setkey, |
3411 | .setauthsize = chcr_4106_4309_setauthsize, | 3703 | .setauthsize = chcr_4106_4309_setauthsize, |
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h index 583008de51a3..96c9335ee728 100644 --- a/drivers/crypto/chelsio/chcr_algo.h +++ b/drivers/crypto/chelsio/chcr_algo.h | |||
@@ -176,21 +176,21 @@ | |||
176 | KEY_CONTEXT_SALT_PRESENT_V(1) | \ | 176 | KEY_CONTEXT_SALT_PRESENT_V(1) | \ |
177 | KEY_CONTEXT_CTX_LEN_V((ctx_len))) | 177 | KEY_CONTEXT_CTX_LEN_V((ctx_len))) |
178 | 178 | ||
179 | #define FILL_WR_OP_CCTX_SIZE(len, ctx_len) \ | 179 | #define FILL_WR_OP_CCTX_SIZE \ |
180 | htonl( \ | 180 | htonl( \ |
181 | FW_CRYPTO_LOOKASIDE_WR_OPCODE_V( \ | 181 | FW_CRYPTO_LOOKASIDE_WR_OPCODE_V( \ |
182 | FW_CRYPTO_LOOKASIDE_WR) | \ | 182 | FW_CRYPTO_LOOKASIDE_WR) | \ |
183 | FW_CRYPTO_LOOKASIDE_WR_COMPL_V(0) | \ | 183 | FW_CRYPTO_LOOKASIDE_WR_COMPL_V(0) | \ |
184 | FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_V((len)) | \ | 184 | FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_V((0)) | \ |
185 | FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(1) | \ | 185 | FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(0) | \ |
186 | FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V((ctx_len))) | 186 | FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V(0)) |
187 | 187 | ||
188 | #define FILL_WR_RX_Q_ID(cid, qid, wr_iv, lcb, fid) \ | 188 | #define FILL_WR_RX_Q_ID(cid, qid, lcb, fid) \ |
189 | htonl( \ | 189 | htonl( \ |
190 | FW_CRYPTO_LOOKASIDE_WR_RX_CHID_V((cid)) | \ | 190 | FW_CRYPTO_LOOKASIDE_WR_RX_CHID_V((cid)) | \ |
191 | FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_V((qid)) | \ | 191 | FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_V((qid)) | \ |
192 | FW_CRYPTO_LOOKASIDE_WR_LCB_V((lcb)) | \ | 192 | FW_CRYPTO_LOOKASIDE_WR_LCB_V((lcb)) | \ |
193 | FW_CRYPTO_LOOKASIDE_WR_IV_V((wr_iv)) | \ | 193 | FW_CRYPTO_LOOKASIDE_WR_IV_V((IV_NOP)) | \ |
194 | FW_CRYPTO_LOOKASIDE_WR_FQIDX_V(fid)) | 194 | FW_CRYPTO_LOOKASIDE_WR_FQIDX_V(fid)) |
195 | 195 | ||
196 | #define FILL_ULPTX_CMD_DEST(cid, qid) \ | 196 | #define FILL_ULPTX_CMD_DEST(cid, qid) \ |
@@ -214,27 +214,22 @@ | |||
214 | calc_tx_flits_ofld(skb) * 8), 16))) | 214 | calc_tx_flits_ofld(skb) * 8), 16))) |
215 | 215 | ||
216 | #define FILL_CMD_MORE(immdatalen) htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) |\ | 216 | #define FILL_CMD_MORE(immdatalen) htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) |\ |
217 | ULP_TX_SC_MORE_V((immdatalen) ? 0 : 1)) | 217 | ULP_TX_SC_MORE_V((immdatalen))) |
218 | |||
219 | #define MAX_NK 8 | 218 | #define MAX_NK 8 |
220 | #define CRYPTO_MAX_IMM_TX_PKT_LEN 256 | ||
221 | #define MAX_WR_SIZE 512 | ||
222 | #define ROUND_16(bytes) ((bytes) & 0xFFFFFFF0) | 219 | #define ROUND_16(bytes) ((bytes) & 0xFFFFFFF0) |
223 | #define MAX_DSGL_ENT 32 | 220 | #define MAX_DSGL_ENT 32 |
224 | #define MAX_DIGEST_SKB_SGE (MAX_SKB_FRAGS - 2) | ||
225 | #define MIN_CIPHER_SG 1 /* IV */ | 221 | #define MIN_CIPHER_SG 1 /* IV */ |
226 | #define MIN_AUTH_SG 2 /*IV + AAD*/ | 222 | #define MIN_AUTH_SG 1 /* IV */ |
227 | #define MIN_GCM_SG 2 /* IV + AAD*/ | 223 | #define MIN_GCM_SG 1 /* IV */ |
228 | #define MIN_DIGEST_SG 1 /*Partial Buffer*/ | 224 | #define MIN_DIGEST_SG 1 /*Partial Buffer*/ |
229 | #define MIN_CCM_SG 3 /*IV+AAD+B0*/ | 225 | #define MIN_CCM_SG 2 /*IV+B0*/ |
230 | #define SPACE_LEFT(len) \ | 226 | #define SPACE_LEFT(len) \ |
231 | ((MAX_WR_SIZE - WR_MIN_LEN - (len))) | 227 | ((SGE_MAX_WR_LEN - WR_MIN_LEN - (len))) |
232 | 228 | ||
233 | unsigned int sgl_ent_len[] = {0, 0, 16, 24, 40, | 229 | unsigned int sgl_ent_len[] = {0, 0, 16, 24, 40, 48, 64, 72, 88, |
234 | 48, 64, 72, 88, | 230 | 96, 112, 120, 136, 144, 160, 168, 184, |
235 | 96, 112, 120, 136, | 231 | 192, 208, 216, 232, 240, 256, 264, 280, |
236 | 144, 160, 168, 184, | 232 | 288, 304, 312, 328, 336, 352, 360, 376}; |
237 | 192}; | ||
238 | unsigned int dsgl_ent_len[] = {0, 32, 32, 48, 48, 64, 64, 80, 80, | 233 | unsigned int dsgl_ent_len[] = {0, 32, 32, 48, 48, 64, 64, 80, 80, |
239 | 112, 112, 128, 128, 144, 144, 160, 160, | 234 | 112, 112, 128, 128, 144, 144, 160, 160, |
240 | 192, 192, 208, 208, 224, 224, 240, 240, | 235 | 192, 192, 208, 208, 224, 224, 240, 240, |
@@ -258,10 +253,8 @@ struct hash_wr_param { | |||
258 | 253 | ||
259 | struct cipher_wr_param { | 254 | struct cipher_wr_param { |
260 | struct ablkcipher_request *req; | 255 | struct ablkcipher_request *req; |
261 | struct scatterlist *srcsg; | ||
262 | char *iv; | 256 | char *iv; |
263 | int bytes; | 257 | int bytes; |
264 | short int snent; | ||
265 | unsigned short qid; | 258 | unsigned short qid; |
266 | }; | 259 | }; |
267 | enum { | 260 | enum { |
@@ -299,31 +292,11 @@ enum { | |||
299 | ICV_16 = 16 | 292 | ICV_16 = 16 |
300 | }; | 293 | }; |
301 | 294 | ||
302 | struct hash_op_params { | ||
303 | unsigned char mk_size; | ||
304 | unsigned char pad_align; | ||
305 | unsigned char auth_mode; | ||
306 | char hash_name[MAX_HASH_NAME]; | ||
307 | unsigned short block_size; | ||
308 | unsigned short word_size; | ||
309 | unsigned short ipad_size; | ||
310 | }; | ||
311 | |||
312 | struct phys_sge_pairs { | 295 | struct phys_sge_pairs { |
313 | __be16 len[8]; | 296 | __be16 len[8]; |
314 | __be64 addr[8]; | 297 | __be64 addr[8]; |
315 | }; | 298 | }; |
316 | 299 | ||
317 | struct phys_sge_parm { | ||
318 | unsigned int nents; | ||
319 | unsigned int obsize; | ||
320 | unsigned short qid; | ||
321 | }; | ||
322 | |||
323 | struct crypto_result { | ||
324 | struct completion completion; | ||
325 | int err; | ||
326 | }; | ||
327 | 300 | ||
328 | static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { | 301 | static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { |
329 | SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, | 302 | SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, |
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c index b6dd9cbe815f..f5a2624081dc 100644 --- a/drivers/crypto/chelsio/chcr_core.c +++ b/drivers/crypto/chelsio/chcr_core.c | |||
@@ -154,15 +154,15 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld) | |||
154 | struct uld_ctx *u_ctx; | 154 | struct uld_ctx *u_ctx; |
155 | 155 | ||
156 | /* Create the device and add it in the device list */ | 156 | /* Create the device and add it in the device list */ |
157 | if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE)) | ||
158 | return ERR_PTR(-EOPNOTSUPP); | ||
159 | |||
160 | /* Create the device and add it in the device list */ | ||
157 | u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL); | 161 | u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL); |
158 | if (!u_ctx) { | 162 | if (!u_ctx) { |
159 | u_ctx = ERR_PTR(-ENOMEM); | 163 | u_ctx = ERR_PTR(-ENOMEM); |
160 | goto out; | 164 | goto out; |
161 | } | 165 | } |
162 | if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE)) { | ||
163 | u_ctx = ERR_PTR(-ENOMEM); | ||
164 | goto out; | ||
165 | } | ||
166 | u_ctx->lldi = *lld; | 166 | u_ctx->lldi = *lld; |
167 | out: | 167 | out: |
168 | return u_ctx; | 168 | return u_ctx; |
@@ -224,7 +224,7 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state) | |||
224 | static int __init chcr_crypto_init(void) | 224 | static int __init chcr_crypto_init(void) |
225 | { | 225 | { |
226 | if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) | 226 | if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) |
227 | pr_err("ULD register fail: No chcr crypto support in cxgb4"); | 227 | pr_err("ULD register fail: No chcr crypto support in cxgb4\n"); |
228 | 228 | ||
229 | return 0; | 229 | return 0; |
230 | } | 230 | } |
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h index c9a19b2a1e9f..94e7412f6164 100644 --- a/drivers/crypto/chelsio/chcr_core.h +++ b/drivers/crypto/chelsio/chcr_core.h | |||
@@ -89,7 +89,7 @@ struct uld_ctx { | |||
89 | struct chcr_dev *dev; | 89 | struct chcr_dev *dev; |
90 | }; | 90 | }; |
91 | 91 | ||
92 | struct uld_ctx * assign_chcr_device(void); | 92 | struct uld_ctx *assign_chcr_device(void); |
93 | int chcr_send_wr(struct sk_buff *skb); | 93 | int chcr_send_wr(struct sk_buff *skb); |
94 | int start_crypto(void); | 94 | int start_crypto(void); |
95 | int stop_crypto(void); | 95 | int stop_crypto(void); |
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h index 30af1ee17b87..94a87e3ad9bc 100644 --- a/drivers/crypto/chelsio/chcr_crypto.h +++ b/drivers/crypto/chelsio/chcr_crypto.h | |||
@@ -149,9 +149,23 @@ | |||
149 | 149 | ||
150 | #define CHCR_HASH_MAX_BLOCK_SIZE_64 64 | 150 | #define CHCR_HASH_MAX_BLOCK_SIZE_64 64 |
151 | #define CHCR_HASH_MAX_BLOCK_SIZE_128 128 | 151 | #define CHCR_HASH_MAX_BLOCK_SIZE_128 128 |
152 | #define CHCR_SG_SIZE 2048 | 152 | #define CHCR_SRC_SG_SIZE (0x10000 - sizeof(int)) |
153 | #define CHCR_DST_SG_SIZE 2048 | ||
153 | 154 | ||
154 | /* Aligned to 128 bit boundary */ | 155 | static inline struct chcr_context *a_ctx(struct crypto_aead *tfm) |
156 | { | ||
157 | return crypto_aead_ctx(tfm); | ||
158 | } | ||
159 | |||
160 | static inline struct chcr_context *c_ctx(struct crypto_ablkcipher *tfm) | ||
161 | { | ||
162 | return crypto_ablkcipher_ctx(tfm); | ||
163 | } | ||
164 | |||
165 | static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm) | ||
166 | { | ||
167 | return crypto_tfm_ctx(crypto_ahash_tfm(tfm)); | ||
168 | } | ||
155 | 169 | ||
156 | struct ablk_ctx { | 170 | struct ablk_ctx { |
157 | struct crypto_skcipher *sw_cipher; | 171 | struct crypto_skcipher *sw_cipher; |
@@ -165,16 +179,39 @@ struct ablk_ctx { | |||
165 | }; | 179 | }; |
166 | struct chcr_aead_reqctx { | 180 | struct chcr_aead_reqctx { |
167 | struct sk_buff *skb; | 181 | struct sk_buff *skb; |
168 | struct scatterlist *dst; | 182 | dma_addr_t iv_dma; |
169 | struct scatterlist *newdstsg; | 183 | dma_addr_t b0_dma; |
170 | struct scatterlist srcffwd[2]; | 184 | unsigned int b0_len; |
171 | struct scatterlist dstffwd[2]; | 185 | unsigned int op; |
186 | short int aad_nents; | ||
187 | short int src_nents; | ||
172 | short int dst_nents; | 188 | short int dst_nents; |
189 | u16 imm; | ||
173 | u16 verify; | 190 | u16 verify; |
174 | u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; | 191 | u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; |
175 | unsigned char scratch_pad[MAX_SCRATCH_PAD_SIZE]; | 192 | unsigned char scratch_pad[MAX_SCRATCH_PAD_SIZE]; |
176 | }; | 193 | }; |
177 | 194 | ||
195 | struct ulptx_walk { | ||
196 | struct ulptx_sgl *sgl; | ||
197 | unsigned int nents; | ||
198 | unsigned int pair_idx; | ||
199 | unsigned int last_sg_len; | ||
200 | struct scatterlist *last_sg; | ||
201 | struct ulptx_sge_pair *pair; | ||
202 | |||
203 | }; | ||
204 | |||
205 | struct dsgl_walk { | ||
206 | unsigned int nents; | ||
207 | unsigned int last_sg_len; | ||
208 | struct scatterlist *last_sg; | ||
209 | struct cpl_rx_phys_dsgl *dsgl; | ||
210 | struct phys_sge_pairs *to; | ||
211 | }; | ||
212 | |||
213 | |||
214 | |||
178 | struct chcr_gcm_ctx { | 215 | struct chcr_gcm_ctx { |
179 | u8 ghash_h[AEAD_H_SIZE]; | 216 | u8 ghash_h[AEAD_H_SIZE]; |
180 | }; | 217 | }; |
@@ -195,7 +232,6 @@ struct __aead_ctx { | |||
195 | struct chcr_aead_ctx { | 232 | struct chcr_aead_ctx { |
196 | __be32 key_ctx_hdr; | 233 | __be32 key_ctx_hdr; |
197 | unsigned int enckey_len; | 234 | unsigned int enckey_len; |
198 | struct crypto_skcipher *null; | ||
199 | struct crypto_aead *sw_cipher; | 235 | struct crypto_aead *sw_cipher; |
200 | u8 salt[MAX_SALT]; | 236 | u8 salt[MAX_SALT]; |
201 | u8 key[CHCR_AES_MAX_KEY_LEN]; | 237 | u8 key[CHCR_AES_MAX_KEY_LEN]; |
@@ -231,8 +267,11 @@ struct chcr_ahash_req_ctx { | |||
231 | u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128]; | 267 | u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128]; |
232 | u8 *reqbfr; | 268 | u8 *reqbfr; |
233 | u8 *skbfr; | 269 | u8 *skbfr; |
270 | dma_addr_t dma_addr; | ||
271 | u32 dma_len; | ||
234 | u8 reqlen; | 272 | u8 reqlen; |
235 | /* DMA the partial hash in it */ | 273 | u8 imm; |
274 | u8 is_sg_map; | ||
236 | u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE]; | 275 | u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE]; |
237 | u64 data_len; /* Data len till time */ | 276 | u64 data_len; /* Data len till time */ |
238 | /* SKB which is being sent to the hardware for processing */ | 277 | /* SKB which is being sent to the hardware for processing */ |
@@ -241,14 +280,15 @@ struct chcr_ahash_req_ctx { | |||
241 | 280 | ||
242 | struct chcr_blkcipher_req_ctx { | 281 | struct chcr_blkcipher_req_ctx { |
243 | struct sk_buff *skb; | 282 | struct sk_buff *skb; |
244 | struct scatterlist srcffwd[2]; | ||
245 | struct scatterlist dstffwd[2]; | ||
246 | struct scatterlist *dstsg; | 283 | struct scatterlist *dstsg; |
247 | struct scatterlist *dst; | ||
248 | struct scatterlist *newdstsg; | ||
249 | unsigned int processed; | 284 | unsigned int processed; |
285 | unsigned int last_req_len; | ||
286 | struct scatterlist *srcsg; | ||
287 | unsigned int src_ofst; | ||
288 | unsigned int dst_ofst; | ||
250 | unsigned int op; | 289 | unsigned int op; |
251 | short int dst_nents; | 290 | dma_addr_t iv_dma; |
291 | u16 imm; | ||
252 | u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; | 292 | u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; |
253 | }; | 293 | }; |
254 | 294 | ||
@@ -262,24 +302,6 @@ struct chcr_alg_template { | |||
262 | } alg; | 302 | } alg; |
263 | }; | 303 | }; |
264 | 304 | ||
265 | struct chcr_req_ctx { | ||
266 | union { | ||
267 | struct ahash_request *ahash_req; | ||
268 | struct aead_request *aead_req; | ||
269 | struct ablkcipher_request *ablk_req; | ||
270 | } req; | ||
271 | union { | ||
272 | struct chcr_ahash_req_ctx *ahash_ctx; | ||
273 | struct chcr_aead_reqctx *reqctx; | ||
274 | struct chcr_blkcipher_req_ctx *ablk_ctx; | ||
275 | } ctx; | ||
276 | }; | ||
277 | |||
278 | struct sge_opaque_hdr { | ||
279 | void *dev; | ||
280 | dma_addr_t addr[MAX_SKB_FRAGS + 1]; | ||
281 | }; | ||
282 | |||
283 | typedef struct sk_buff *(*create_wr_t)(struct aead_request *req, | 305 | typedef struct sk_buff *(*create_wr_t)(struct aead_request *req, |
284 | unsigned short qid, | 306 | unsigned short qid, |
285 | int size, | 307 | int size, |
@@ -290,10 +312,39 @@ static int chcr_aead_op(struct aead_request *req_base, | |||
290 | int size, | 312 | int size, |
291 | create_wr_t create_wr_fn); | 313 | create_wr_t create_wr_fn); |
292 | static inline int get_aead_subtype(struct crypto_aead *aead); | 314 | static inline int get_aead_subtype(struct crypto_aead *aead); |
293 | static int is_newsg(struct scatterlist *sgl, unsigned int *newents); | ||
294 | static struct scatterlist *alloc_new_sg(struct scatterlist *sgl, | ||
295 | unsigned int nents); | ||
296 | static inline void free_new_sg(struct scatterlist *sgl); | ||
297 | static int chcr_handle_cipher_resp(struct ablkcipher_request *req, | 315 | static int chcr_handle_cipher_resp(struct ablkcipher_request *req, |
298 | unsigned char *input, int err); | 316 | unsigned char *input, int err); |
317 | static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err); | ||
318 | static int chcr_aead_dma_map(struct device *dev, struct aead_request *req, | ||
319 | unsigned short op_type); | ||
320 | static void chcr_aead_dma_unmap(struct device *dev, struct aead_request | ||
321 | *req, unsigned short op_type); | ||
322 | static inline void chcr_add_aead_dst_ent(struct aead_request *req, | ||
323 | struct cpl_rx_phys_dsgl *phys_cpl, | ||
324 | unsigned int assoclen, | ||
325 | unsigned short op_type, | ||
326 | unsigned short qid); | ||
327 | static inline void chcr_add_aead_src_ent(struct aead_request *req, | ||
328 | struct ulptx_sgl *ulptx, | ||
329 | unsigned int assoclen, | ||
330 | unsigned short op_type); | ||
331 | static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req, | ||
332 | struct ulptx_sgl *ulptx, | ||
333 | struct cipher_wr_param *wrparam); | ||
334 | static int chcr_cipher_dma_map(struct device *dev, | ||
335 | struct ablkcipher_request *req); | ||
336 | static void chcr_cipher_dma_unmap(struct device *dev, | ||
337 | struct ablkcipher_request *req); | ||
338 | static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req, | ||
339 | struct cpl_rx_phys_dsgl *phys_cpl, | ||
340 | struct cipher_wr_param *wrparam, | ||
341 | unsigned short qid); | ||
342 | int sg_nents_len_skip(struct scatterlist *sg, u64 len, u64 skip); | ||
343 | static inline void chcr_add_hash_src_ent(struct ahash_request *req, | ||
344 | struct ulptx_sgl *ulptx, | ||
345 | struct hash_wr_param *param); | ||
346 | static inline int chcr_hash_dma_map(struct device *dev, | ||
347 | struct ahash_request *req); | ||
348 | static inline void chcr_hash_dma_unmap(struct device *dev, | ||
349 | struct ahash_request *req); | ||
299 | #endif /* __CHCR_CRYPTO_H__ */ | 350 | #endif /* __CHCR_CRYPTO_H__ */ |
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 3980f946874f..74feb6227101 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c | |||
@@ -308,10 +308,8 @@ unmap_cache: | |||
308 | ctx->base.cache_sz = 0; | 308 | ctx->base.cache_sz = 0; |
309 | } | 309 | } |
310 | free_cache: | 310 | free_cache: |
311 | if (ctx->base.cache) { | 311 | kfree(ctx->base.cache); |
312 | kfree(ctx->base.cache); | 312 | ctx->base.cache = NULL; |
313 | ctx->base.cache = NULL; | ||
314 | } | ||
315 | 313 | ||
316 | unlock: | 314 | unlock: |
317 | spin_unlock_bh(&priv->ring[ring].egress_lock); | 315 | spin_unlock_bh(&priv->ring[ring].egress_lock); |
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index dadc4a808df5..8705b28eb02c 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c | |||
@@ -534,7 +534,6 @@ static void release_ixp_crypto(struct device *dev) | |||
534 | NPE_QLEN_TOTAL * sizeof( struct crypt_ctl), | 534 | NPE_QLEN_TOTAL * sizeof( struct crypt_ctl), |
535 | crypt_virt, crypt_phys); | 535 | crypt_virt, crypt_phys); |
536 | } | 536 | } |
537 | return; | ||
538 | } | 537 | } |
539 | 538 | ||
540 | static void reset_sa_dir(struct ix_sa_dir *dir) | 539 | static void reset_sa_dir(struct ix_sa_dir *dir) |
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index 6e7a5c77a00a..293832488cc9 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c | |||
@@ -34,10 +34,6 @@ | |||
34 | /* Limit of the crypto queue before reaching the backlog */ | 34 | /* Limit of the crypto queue before reaching the backlog */ |
35 | #define CESA_CRYPTO_DEFAULT_MAX_QLEN 128 | 35 | #define CESA_CRYPTO_DEFAULT_MAX_QLEN 128 |
36 | 36 | ||
37 | static int allhwsupport = !IS_ENABLED(CONFIG_CRYPTO_DEV_MV_CESA); | ||
38 | module_param_named(allhwsupport, allhwsupport, int, 0444); | ||
39 | MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if overlaps with the mv_cesa driver)"); | ||
40 | |||
41 | struct mv_cesa_dev *cesa_dev; | 37 | struct mv_cesa_dev *cesa_dev; |
42 | 38 | ||
43 | struct crypto_async_request * | 39 | struct crypto_async_request * |
@@ -76,8 +72,6 @@ static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine) | |||
76 | 72 | ||
77 | ctx = crypto_tfm_ctx(req->tfm); | 73 | ctx = crypto_tfm_ctx(req->tfm); |
78 | ctx->ops->step(req); | 74 | ctx->ops->step(req); |
79 | |||
80 | return; | ||
81 | } | 75 | } |
82 | 76 | ||
83 | static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status) | 77 | static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status) |
@@ -183,8 +177,7 @@ int mv_cesa_queue_req(struct crypto_async_request *req, | |||
183 | spin_lock_bh(&engine->lock); | 177 | spin_lock_bh(&engine->lock); |
184 | ret = crypto_enqueue_request(&engine->queue, req); | 178 | ret = crypto_enqueue_request(&engine->queue, req); |
185 | if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) && | 179 | if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) && |
186 | (ret == -EINPROGRESS || | 180 | (ret == -EINPROGRESS || ret == -EBUSY)) |
187 | (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) | ||
188 | mv_cesa_tdma_chain(engine, creq); | 181 | mv_cesa_tdma_chain(engine, creq); |
189 | spin_unlock_bh(&engine->lock); | 182 | spin_unlock_bh(&engine->lock); |
190 | 183 | ||
@@ -202,7 +195,7 @@ static int mv_cesa_add_algs(struct mv_cesa_dev *cesa) | |||
202 | int i, j; | 195 | int i, j; |
203 | 196 | ||
204 | for (i = 0; i < cesa->caps->ncipher_algs; i++) { | 197 | for (i = 0; i < cesa->caps->ncipher_algs; i++) { |
205 | ret = crypto_register_alg(cesa->caps->cipher_algs[i]); | 198 | ret = crypto_register_skcipher(cesa->caps->cipher_algs[i]); |
206 | if (ret) | 199 | if (ret) |
207 | goto err_unregister_crypto; | 200 | goto err_unregister_crypto; |
208 | } | 201 | } |
@@ -222,7 +215,7 @@ err_unregister_ahash: | |||
222 | 215 | ||
223 | err_unregister_crypto: | 216 | err_unregister_crypto: |
224 | for (j = 0; j < i; j++) | 217 | for (j = 0; j < i; j++) |
225 | crypto_unregister_alg(cesa->caps->cipher_algs[j]); | 218 | crypto_unregister_skcipher(cesa->caps->cipher_algs[j]); |
226 | 219 | ||
227 | return ret; | 220 | return ret; |
228 | } | 221 | } |
@@ -235,10 +228,10 @@ static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa) | |||
235 | crypto_unregister_ahash(cesa->caps->ahash_algs[i]); | 228 | crypto_unregister_ahash(cesa->caps->ahash_algs[i]); |
236 | 229 | ||
237 | for (i = 0; i < cesa->caps->ncipher_algs; i++) | 230 | for (i = 0; i < cesa->caps->ncipher_algs; i++) |
238 | crypto_unregister_alg(cesa->caps->cipher_algs[i]); | 231 | crypto_unregister_skcipher(cesa->caps->cipher_algs[i]); |
239 | } | 232 | } |
240 | 233 | ||
241 | static struct crypto_alg *orion_cipher_algs[] = { | 234 | static struct skcipher_alg *orion_cipher_algs[] = { |
242 | &mv_cesa_ecb_des_alg, | 235 | &mv_cesa_ecb_des_alg, |
243 | &mv_cesa_cbc_des_alg, | 236 | &mv_cesa_cbc_des_alg, |
244 | &mv_cesa_ecb_des3_ede_alg, | 237 | &mv_cesa_ecb_des3_ede_alg, |
@@ -254,7 +247,7 @@ static struct ahash_alg *orion_ahash_algs[] = { | |||
254 | &mv_ahmac_sha1_alg, | 247 | &mv_ahmac_sha1_alg, |
255 | }; | 248 | }; |
256 | 249 | ||
257 | static struct crypto_alg *armada_370_cipher_algs[] = { | 250 | static struct skcipher_alg *armada_370_cipher_algs[] = { |
258 | &mv_cesa_ecb_des_alg, | 251 | &mv_cesa_ecb_des_alg, |
259 | &mv_cesa_cbc_des_alg, | 252 | &mv_cesa_cbc_des_alg, |
260 | &mv_cesa_ecb_des3_ede_alg, | 253 | &mv_cesa_ecb_des3_ede_alg, |
@@ -459,9 +452,6 @@ static int mv_cesa_probe(struct platform_device *pdev) | |||
459 | caps = match->data; | 452 | caps = match->data; |
460 | } | 453 | } |
461 | 454 | ||
462 | if ((caps == &orion_caps || caps == &kirkwood_caps) && !allhwsupport) | ||
463 | return -ENOTSUPP; | ||
464 | |||
465 | cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL); | 455 | cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL); |
466 | if (!cesa) | 456 | if (!cesa) |
467 | return -ENOMEM; | 457 | return -ENOMEM; |
@@ -599,9 +589,16 @@ static int mv_cesa_remove(struct platform_device *pdev) | |||
599 | return 0; | 589 | return 0; |
600 | } | 590 | } |
601 | 591 | ||
592 | static const struct platform_device_id mv_cesa_plat_id_table[] = { | ||
593 | { .name = "mv_crypto" }, | ||
594 | { /* sentinel */ }, | ||
595 | }; | ||
596 | MODULE_DEVICE_TABLE(platform, mv_cesa_plat_id_table); | ||
597 | |||
602 | static struct platform_driver marvell_cesa = { | 598 | static struct platform_driver marvell_cesa = { |
603 | .probe = mv_cesa_probe, | 599 | .probe = mv_cesa_probe, |
604 | .remove = mv_cesa_remove, | 600 | .remove = mv_cesa_remove, |
601 | .id_table = mv_cesa_plat_id_table, | ||
605 | .driver = { | 602 | .driver = { |
606 | .name = "marvell-cesa", | 603 | .name = "marvell-cesa", |
607 | .of_match_table = mv_cesa_of_match_table, | 604 | .of_match_table = mv_cesa_of_match_table, |
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h index 0032e3bf46ee..d63a6ee905c9 100644 --- a/drivers/crypto/marvell/cesa.h +++ b/drivers/crypto/marvell/cesa.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <crypto/algapi.h> | 5 | #include <crypto/algapi.h> |
6 | #include <crypto/hash.h> | 6 | #include <crypto/hash.h> |
7 | #include <crypto/internal/hash.h> | 7 | #include <crypto/internal/hash.h> |
8 | #include <crypto/internal/skcipher.h> | ||
8 | 9 | ||
9 | #include <linux/crypto.h> | 10 | #include <linux/crypto.h> |
10 | #include <linux/dmapool.h> | 11 | #include <linux/dmapool.h> |
@@ -373,7 +374,7 @@ struct mv_cesa_engine; | |||
373 | struct mv_cesa_caps { | 374 | struct mv_cesa_caps { |
374 | int nengines; | 375 | int nengines; |
375 | bool has_tdma; | 376 | bool has_tdma; |
376 | struct crypto_alg **cipher_algs; | 377 | struct skcipher_alg **cipher_algs; |
377 | int ncipher_algs; | 378 | int ncipher_algs; |
378 | struct ahash_alg **ahash_algs; | 379 | struct ahash_alg **ahash_algs; |
379 | int nahash_algs; | 380 | int nahash_algs; |
@@ -539,12 +540,12 @@ struct mv_cesa_sg_std_iter { | |||
539 | }; | 540 | }; |
540 | 541 | ||
541 | /** | 542 | /** |
542 | * struct mv_cesa_ablkcipher_std_req - cipher standard request | 543 | * struct mv_cesa_skcipher_std_req - cipher standard request |
543 | * @op: operation context | 544 | * @op: operation context |
544 | * @offset: current operation offset | 545 | * @offset: current operation offset |
545 | * @size: size of the crypto operation | 546 | * @size: size of the crypto operation |
546 | */ | 547 | */ |
547 | struct mv_cesa_ablkcipher_std_req { | 548 | struct mv_cesa_skcipher_std_req { |
548 | struct mv_cesa_op_ctx op; | 549 | struct mv_cesa_op_ctx op; |
549 | unsigned int offset; | 550 | unsigned int offset; |
550 | unsigned int size; | 551 | unsigned int size; |
@@ -552,14 +553,14 @@ struct mv_cesa_ablkcipher_std_req { | |||
552 | }; | 553 | }; |
553 | 554 | ||
554 | /** | 555 | /** |
555 | * struct mv_cesa_ablkcipher_req - cipher request | 556 | * struct mv_cesa_skcipher_req - cipher request |
556 | * @req: type specific request information | 557 | * @req: type specific request information |
557 | * @src_nents: number of entries in the src sg list | 558 | * @src_nents: number of entries in the src sg list |
558 | * @dst_nents: number of entries in the dest sg list | 559 | * @dst_nents: number of entries in the dest sg list |
559 | */ | 560 | */ |
560 | struct mv_cesa_ablkcipher_req { | 561 | struct mv_cesa_skcipher_req { |
561 | struct mv_cesa_req base; | 562 | struct mv_cesa_req base; |
562 | struct mv_cesa_ablkcipher_std_req std; | 563 | struct mv_cesa_skcipher_std_req std; |
563 | int src_nents; | 564 | int src_nents; |
564 | int dst_nents; | 565 | int dst_nents; |
565 | }; | 566 | }; |
@@ -764,7 +765,7 @@ static inline int mv_cesa_req_needs_cleanup(struct crypto_async_request *req, | |||
764 | * the backlog and will be processed later. There's no need to | 765 | * the backlog and will be processed later. There's no need to |
765 | * clean it up. | 766 | * clean it up. |
766 | */ | 767 | */ |
767 | if (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) | 768 | if (ret == -EBUSY) |
768 | return false; | 769 | return false; |
769 | 770 | ||
770 | /* Request wasn't queued, we need to clean it up */ | 771 | /* Request wasn't queued, we need to clean it up */ |
@@ -869,11 +870,11 @@ extern struct ahash_alg mv_ahmac_md5_alg; | |||
869 | extern struct ahash_alg mv_ahmac_sha1_alg; | 870 | extern struct ahash_alg mv_ahmac_sha1_alg; |
870 | extern struct ahash_alg mv_ahmac_sha256_alg; | 871 | extern struct ahash_alg mv_ahmac_sha256_alg; |
871 | 872 | ||
872 | extern struct crypto_alg mv_cesa_ecb_des_alg; | 873 | extern struct skcipher_alg mv_cesa_ecb_des_alg; |
873 | extern struct crypto_alg mv_cesa_cbc_des_alg; | 874 | extern struct skcipher_alg mv_cesa_cbc_des_alg; |
874 | extern struct crypto_alg mv_cesa_ecb_des3_ede_alg; | 875 | extern struct skcipher_alg mv_cesa_ecb_des3_ede_alg; |
875 | extern struct crypto_alg mv_cesa_cbc_des3_ede_alg; | 876 | extern struct skcipher_alg mv_cesa_cbc_des3_ede_alg; |
876 | extern struct crypto_alg mv_cesa_ecb_aes_alg; | 877 | extern struct skcipher_alg mv_cesa_ecb_aes_alg; |
877 | extern struct crypto_alg mv_cesa_cbc_aes_alg; | 878 | extern struct skcipher_alg mv_cesa_cbc_aes_alg; |
878 | 879 | ||
879 | #endif /* __MARVELL_CESA_H__ */ | 880 | #endif /* __MARVELL_CESA_H__ */ |
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c index 098871a22a54..0ae84ec9e21c 100644 --- a/drivers/crypto/marvell/cipher.c +++ b/drivers/crypto/marvell/cipher.c | |||
@@ -32,23 +32,23 @@ struct mv_cesa_aes_ctx { | |||
32 | struct crypto_aes_ctx aes; | 32 | struct crypto_aes_ctx aes; |
33 | }; | 33 | }; |
34 | 34 | ||
35 | struct mv_cesa_ablkcipher_dma_iter { | 35 | struct mv_cesa_skcipher_dma_iter { |
36 | struct mv_cesa_dma_iter base; | 36 | struct mv_cesa_dma_iter base; |
37 | struct mv_cesa_sg_dma_iter src; | 37 | struct mv_cesa_sg_dma_iter src; |
38 | struct mv_cesa_sg_dma_iter dst; | 38 | struct mv_cesa_sg_dma_iter dst; |
39 | }; | 39 | }; |
40 | 40 | ||
41 | static inline void | 41 | static inline void |
42 | mv_cesa_ablkcipher_req_iter_init(struct mv_cesa_ablkcipher_dma_iter *iter, | 42 | mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter *iter, |
43 | struct ablkcipher_request *req) | 43 | struct skcipher_request *req) |
44 | { | 44 | { |
45 | mv_cesa_req_dma_iter_init(&iter->base, req->nbytes); | 45 | mv_cesa_req_dma_iter_init(&iter->base, req->cryptlen); |
46 | mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); | 46 | mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); |
47 | mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE); | 47 | mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE); |
48 | } | 48 | } |
49 | 49 | ||
50 | static inline bool | 50 | static inline bool |
51 | mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter *iter) | 51 | mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter *iter) |
52 | { | 52 | { |
53 | iter->src.op_offset = 0; | 53 | iter->src.op_offset = 0; |
54 | iter->dst.op_offset = 0; | 54 | iter->dst.op_offset = 0; |
@@ -57,9 +57,9 @@ mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter *iter) | |||
57 | } | 57 | } |
58 | 58 | ||
59 | static inline void | 59 | static inline void |
60 | mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req) | 60 | mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req) |
61 | { | 61 | { |
62 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); | 62 | struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); |
63 | 63 | ||
64 | if (req->dst != req->src) { | 64 | if (req->dst != req->src) { |
65 | dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents, | 65 | dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents, |
@@ -73,20 +73,20 @@ mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req) | |||
73 | mv_cesa_dma_cleanup(&creq->base); | 73 | mv_cesa_dma_cleanup(&creq->base); |
74 | } | 74 | } |
75 | 75 | ||
76 | static inline void mv_cesa_ablkcipher_cleanup(struct ablkcipher_request *req) | 76 | static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req) |
77 | { | 77 | { |
78 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); | 78 | struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); |
79 | 79 | ||
80 | if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) | 80 | if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) |
81 | mv_cesa_ablkcipher_dma_cleanup(req); | 81 | mv_cesa_skcipher_dma_cleanup(req); |
82 | } | 82 | } |
83 | 83 | ||
84 | static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req) | 84 | static void mv_cesa_skcipher_std_step(struct skcipher_request *req) |
85 | { | 85 | { |
86 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); | 86 | struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); |
87 | struct mv_cesa_ablkcipher_std_req *sreq = &creq->std; | 87 | struct mv_cesa_skcipher_std_req *sreq = &creq->std; |
88 | struct mv_cesa_engine *engine = creq->base.engine; | 88 | struct mv_cesa_engine *engine = creq->base.engine; |
89 | size_t len = min_t(size_t, req->nbytes - sreq->offset, | 89 | size_t len = min_t(size_t, req->cryptlen - sreq->offset, |
90 | CESA_SA_SRAM_PAYLOAD_SIZE); | 90 | CESA_SA_SRAM_PAYLOAD_SIZE); |
91 | 91 | ||
92 | mv_cesa_adjust_op(engine, &sreq->op); | 92 | mv_cesa_adjust_op(engine, &sreq->op); |
@@ -114,11 +114,11 @@ static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req) | |||
114 | writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); | 114 | writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); |
115 | } | 115 | } |
116 | 116 | ||
117 | static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req, | 117 | static int mv_cesa_skcipher_std_process(struct skcipher_request *req, |
118 | u32 status) | 118 | u32 status) |
119 | { | 119 | { |
120 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); | 120 | struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); |
121 | struct mv_cesa_ablkcipher_std_req *sreq = &creq->std; | 121 | struct mv_cesa_skcipher_std_req *sreq = &creq->std; |
122 | struct mv_cesa_engine *engine = creq->base.engine; | 122 | struct mv_cesa_engine *engine = creq->base.engine; |
123 | size_t len; | 123 | size_t len; |
124 | 124 | ||
@@ -127,122 +127,130 @@ static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req, | |||
127 | sreq->size, sreq->offset); | 127 | sreq->size, sreq->offset); |
128 | 128 | ||
129 | sreq->offset += len; | 129 | sreq->offset += len; |
130 | if (sreq->offset < req->nbytes) | 130 | if (sreq->offset < req->cryptlen) |
131 | return -EINPROGRESS; | 131 | return -EINPROGRESS; |
132 | 132 | ||
133 | return 0; | 133 | return 0; |
134 | } | 134 | } |
135 | 135 | ||
136 | static int mv_cesa_ablkcipher_process(struct crypto_async_request *req, | 136 | static int mv_cesa_skcipher_process(struct crypto_async_request *req, |
137 | u32 status) | 137 | u32 status) |
138 | { | 138 | { |
139 | struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); | 139 | struct skcipher_request *skreq = skcipher_request_cast(req); |
140 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); | 140 | struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq); |
141 | struct mv_cesa_req *basereq = &creq->base; | 141 | struct mv_cesa_req *basereq = &creq->base; |
142 | 142 | ||
143 | if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ) | 143 | if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ) |
144 | return mv_cesa_ablkcipher_std_process(ablkreq, status); | 144 | return mv_cesa_skcipher_std_process(skreq, status); |
145 | 145 | ||
146 | return mv_cesa_dma_process(basereq, status); | 146 | return mv_cesa_dma_process(basereq, status); |
147 | } | 147 | } |
148 | 148 | ||
149 | static void mv_cesa_ablkcipher_step(struct crypto_async_request *req) | 149 | static void mv_cesa_skcipher_step(struct crypto_async_request *req) |
150 | { | 150 | { |
151 | struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); | 151 | struct skcipher_request *skreq = skcipher_request_cast(req); |
152 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); | 152 | struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq); |
153 | 153 | ||
154 | if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) | 154 | if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) |
155 | mv_cesa_dma_step(&creq->base); | 155 | mv_cesa_dma_step(&creq->base); |
156 | else | 156 | else |
157 | mv_cesa_ablkcipher_std_step(ablkreq); | 157 | mv_cesa_skcipher_std_step(skreq); |
158 | } | 158 | } |
159 | 159 | ||
160 | static inline void | 160 | static inline void |
161 | mv_cesa_ablkcipher_dma_prepare(struct ablkcipher_request *req) | 161 | mv_cesa_skcipher_dma_prepare(struct skcipher_request *req) |
162 | { | 162 | { |
163 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); | 163 | struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); |
164 | struct mv_cesa_req *basereq = &creq->base; | 164 | struct mv_cesa_req *basereq = &creq->base; |
165 | 165 | ||
166 | mv_cesa_dma_prepare(basereq, basereq->engine); | 166 | mv_cesa_dma_prepare(basereq, basereq->engine); |
167 | } | 167 | } |
168 | 168 | ||
169 | static inline void | 169 | static inline void |
170 | mv_cesa_ablkcipher_std_prepare(struct ablkcipher_request *req) | 170 | mv_cesa_skcipher_std_prepare(struct skcipher_request *req) |
171 | { | 171 | { |
172 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); | 172 | struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); |
173 | struct mv_cesa_ablkcipher_std_req *sreq = &creq->std; | 173 | struct mv_cesa_skcipher_std_req *sreq = &creq->std; |
174 | 174 | ||
175 | sreq->size = 0; | 175 | sreq->size = 0; |
176 | sreq->offset = 0; | 176 | sreq->offset = 0; |
177 | } | 177 | } |
178 | 178 | ||
179 | static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req, | 179 | static inline void mv_cesa_skcipher_prepare(struct crypto_async_request *req, |
180 | struct mv_cesa_engine *engine) | 180 | struct mv_cesa_engine *engine) |
181 | { | 181 | { |
182 | struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); | 182 | struct skcipher_request *skreq = skcipher_request_cast(req); |
183 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); | 183 | struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq); |
184 | creq->base.engine = engine; | 184 | creq->base.engine = engine; |
185 | 185 | ||
186 | if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) | 186 | if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) |
187 | mv_cesa_ablkcipher_dma_prepare(ablkreq); | 187 | mv_cesa_skcipher_dma_prepare(skreq); |
188 | else | 188 | else |
189 | mv_cesa_ablkcipher_std_prepare(ablkreq); | 189 | mv_cesa_skcipher_std_prepare(skreq); |
190 | } | 190 | } |
191 | 191 | ||
192 | static inline void | 192 | static inline void |
193 | mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req) | 193 | mv_cesa_skcipher_req_cleanup(struct crypto_async_request *req) |
194 | { | 194 | { |
195 | struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); | 195 | struct skcipher_request *skreq = skcipher_request_cast(req); |
196 | 196 | ||
197 | mv_cesa_ablkcipher_cleanup(ablkreq); | 197 | mv_cesa_skcipher_cleanup(skreq); |
198 | } | 198 | } |
199 | 199 | ||
200 | static void | 200 | static void |
201 | mv_cesa_ablkcipher_complete(struct crypto_async_request *req) | 201 | mv_cesa_skcipher_complete(struct crypto_async_request *req) |
202 | { | 202 | { |
203 | struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); | 203 | struct skcipher_request *skreq = skcipher_request_cast(req); |
204 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); | 204 | struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq); |
205 | struct mv_cesa_engine *engine = creq->base.engine; | 205 | struct mv_cesa_engine *engine = creq->base.engine; |
206 | unsigned int ivsize; | 206 | unsigned int ivsize; |
207 | 207 | ||
208 | atomic_sub(ablkreq->nbytes, &engine->load); | 208 | atomic_sub(skreq->cryptlen, &engine->load); |
209 | ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq)); | 209 | ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq)); |
210 | 210 | ||
211 | if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) { | 211 | if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) { |
212 | struct mv_cesa_req *basereq; | 212 | struct mv_cesa_req *basereq; |
213 | 213 | ||
214 | basereq = &creq->base; | 214 | basereq = &creq->base; |
215 | memcpy(ablkreq->info, basereq->chain.last->op->ctx.blkcipher.iv, | 215 | memcpy(skreq->iv, basereq->chain.last->op->ctx.blkcipher.iv, |
216 | ivsize); | 216 | ivsize); |
217 | } else { | 217 | } else { |
218 | memcpy_fromio(ablkreq->info, | 218 | memcpy_fromio(skreq->iv, |
219 | engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET, | 219 | engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET, |
220 | ivsize); | 220 | ivsize); |
221 | } | 221 | } |
222 | } | 222 | } |
223 | 223 | ||
224 | static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = { | 224 | static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops = { |
225 | .step = mv_cesa_ablkcipher_step, | 225 | .step = mv_cesa_skcipher_step, |
226 | .process = mv_cesa_ablkcipher_process, | 226 | .process = mv_cesa_skcipher_process, |
227 | .cleanup = mv_cesa_ablkcipher_req_cleanup, | 227 | .cleanup = mv_cesa_skcipher_req_cleanup, |
228 | .complete = mv_cesa_ablkcipher_complete, | 228 | .complete = mv_cesa_skcipher_complete, |
229 | }; | 229 | }; |
230 | 230 | ||
231 | static int mv_cesa_ablkcipher_cra_init(struct crypto_tfm *tfm) | 231 | static void mv_cesa_skcipher_cra_exit(struct crypto_tfm *tfm) |
232 | { | 232 | { |
233 | struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm); | 233 | void *ctx = crypto_tfm_ctx(tfm); |
234 | |||
235 | memzero_explicit(ctx, tfm->__crt_alg->cra_ctxsize); | ||
236 | } | ||
237 | |||
238 | static int mv_cesa_skcipher_cra_init(struct crypto_tfm *tfm) | ||
239 | { | ||
240 | struct mv_cesa_ctx *ctx = crypto_tfm_ctx(tfm); | ||
234 | 241 | ||
235 | ctx->base.ops = &mv_cesa_ablkcipher_req_ops; | 242 | ctx->ops = &mv_cesa_skcipher_req_ops; |
236 | 243 | ||
237 | tfm->crt_ablkcipher.reqsize = sizeof(struct mv_cesa_ablkcipher_req); | 244 | crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), |
245 | sizeof(struct mv_cesa_skcipher_req)); | ||
238 | 246 | ||
239 | return 0; | 247 | return 0; |
240 | } | 248 | } |
241 | 249 | ||
242 | static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | 250 | static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, |
243 | unsigned int len) | 251 | unsigned int len) |
244 | { | 252 | { |
245 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | 253 | struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); |
246 | struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm); | 254 | struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm); |
247 | int remaining; | 255 | int remaining; |
248 | int offset; | 256 | int offset; |
@@ -251,7 +259,7 @@ static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | |||
251 | 259 | ||
252 | ret = crypto_aes_expand_key(&ctx->aes, key, len); | 260 | ret = crypto_aes_expand_key(&ctx->aes, key, len); |
253 | if (ret) { | 261 | if (ret) { |
254 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | 262 | crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); |
255 | return ret; | 263 | return ret; |
256 | } | 264 | } |
257 | 265 | ||
@@ -264,16 +272,16 @@ static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | |||
264 | return 0; | 272 | return 0; |
265 | } | 273 | } |
266 | 274 | ||
267 | static int mv_cesa_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | 275 | static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key, |
268 | unsigned int len) | 276 | unsigned int len) |
269 | { | 277 | { |
270 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | 278 | struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); |
271 | struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm); | 279 | struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm); |
272 | u32 tmp[DES_EXPKEY_WORDS]; | 280 | u32 tmp[DES_EXPKEY_WORDS]; |
273 | int ret; | 281 | int ret; |
274 | 282 | ||
275 | if (len != DES_KEY_SIZE) { | 283 | if (len != DES_KEY_SIZE) { |
276 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | 284 | crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); |
277 | return -EINVAL; | 285 | return -EINVAL; |
278 | } | 286 | } |
279 | 287 | ||
@@ -288,14 +296,14 @@ static int mv_cesa_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | |||
288 | return 0; | 296 | return 0; |
289 | } | 297 | } |
290 | 298 | ||
291 | static int mv_cesa_des3_ede_setkey(struct crypto_ablkcipher *cipher, | 299 | static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher, |
292 | const u8 *key, unsigned int len) | 300 | const u8 *key, unsigned int len) |
293 | { | 301 | { |
294 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | 302 | struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); |
295 | struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm); | 303 | struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm); |
296 | 304 | ||
297 | if (len != DES3_EDE_KEY_SIZE) { | 305 | if (len != DES3_EDE_KEY_SIZE) { |
298 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | 306 | crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); |
299 | return -EINVAL; | 307 | return -EINVAL; |
300 | } | 308 | } |
301 | 309 | ||
@@ -304,14 +312,14 @@ static int mv_cesa_des3_ede_setkey(struct crypto_ablkcipher *cipher, | |||
304 | return 0; | 312 | return 0; |
305 | } | 313 | } |
306 | 314 | ||
307 | static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req, | 315 | static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req, |
308 | const struct mv_cesa_op_ctx *op_templ) | 316 | const struct mv_cesa_op_ctx *op_templ) |
309 | { | 317 | { |
310 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); | 318 | struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); |
311 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | 319 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
312 | GFP_KERNEL : GFP_ATOMIC; | 320 | GFP_KERNEL : GFP_ATOMIC; |
313 | struct mv_cesa_req *basereq = &creq->base; | 321 | struct mv_cesa_req *basereq = &creq->base; |
314 | struct mv_cesa_ablkcipher_dma_iter iter; | 322 | struct mv_cesa_skcipher_dma_iter iter; |
315 | bool skip_ctx = false; | 323 | bool skip_ctx = false; |
316 | int ret; | 324 | int ret; |
317 | unsigned int ivsize; | 325 | unsigned int ivsize; |
@@ -339,7 +347,7 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req, | |||
339 | } | 347 | } |
340 | 348 | ||
341 | mv_cesa_tdma_desc_iter_init(&basereq->chain); | 349 | mv_cesa_tdma_desc_iter_init(&basereq->chain); |
342 | mv_cesa_ablkcipher_req_iter_init(&iter, req); | 350 | mv_cesa_skcipher_req_iter_init(&iter, req); |
343 | 351 | ||
344 | do { | 352 | do { |
345 | struct mv_cesa_op_ctx *op; | 353 | struct mv_cesa_op_ctx *op; |
@@ -370,10 +378,10 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req, | |||
370 | if (ret) | 378 | if (ret) |
371 | goto err_free_tdma; | 379 | goto err_free_tdma; |
372 | 380 | ||
373 | } while (mv_cesa_ablkcipher_req_iter_next_op(&iter)); | 381 | } while (mv_cesa_skcipher_req_iter_next_op(&iter)); |
374 | 382 | ||
375 | /* Add output data for IV */ | 383 | /* Add output data for IV */ |
376 | ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req)); | 384 | ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(req)); |
377 | ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET, | 385 | ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET, |
378 | CESA_SA_DATA_SRAM_OFFSET, | 386 | CESA_SA_DATA_SRAM_OFFSET, |
379 | CESA_TDMA_SRC_IN_SRAM, flags); | 387 | CESA_TDMA_SRC_IN_SRAM, flags); |
@@ -399,11 +407,11 @@ err_unmap_src: | |||
399 | } | 407 | } |
400 | 408 | ||
401 | static inline int | 409 | static inline int |
402 | mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req, | 410 | mv_cesa_skcipher_std_req_init(struct skcipher_request *req, |
403 | const struct mv_cesa_op_ctx *op_templ) | 411 | const struct mv_cesa_op_ctx *op_templ) |
404 | { | 412 | { |
405 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); | 413 | struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); |
406 | struct mv_cesa_ablkcipher_std_req *sreq = &creq->std; | 414 | struct mv_cesa_skcipher_std_req *sreq = &creq->std; |
407 | struct mv_cesa_req *basereq = &creq->base; | 415 | struct mv_cesa_req *basereq = &creq->base; |
408 | 416 | ||
409 | sreq->op = *op_templ; | 417 | sreq->op = *op_templ; |
@@ -414,23 +422,23 @@ mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req, | |||
414 | return 0; | 422 | return 0; |
415 | } | 423 | } |
416 | 424 | ||
417 | static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req, | 425 | static int mv_cesa_skcipher_req_init(struct skcipher_request *req, |
418 | struct mv_cesa_op_ctx *tmpl) | 426 | struct mv_cesa_op_ctx *tmpl) |
419 | { | 427 | { |
420 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); | 428 | struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); |
421 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 429 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
422 | unsigned int blksize = crypto_ablkcipher_blocksize(tfm); | 430 | unsigned int blksize = crypto_skcipher_blocksize(tfm); |
423 | int ret; | 431 | int ret; |
424 | 432 | ||
425 | if (!IS_ALIGNED(req->nbytes, blksize)) | 433 | if (!IS_ALIGNED(req->cryptlen, blksize)) |
426 | return -EINVAL; | 434 | return -EINVAL; |
427 | 435 | ||
428 | creq->src_nents = sg_nents_for_len(req->src, req->nbytes); | 436 | creq->src_nents = sg_nents_for_len(req->src, req->cryptlen); |
429 | if (creq->src_nents < 0) { | 437 | if (creq->src_nents < 0) { |
430 | dev_err(cesa_dev->dev, "Invalid number of src SG"); | 438 | dev_err(cesa_dev->dev, "Invalid number of src SG"); |
431 | return creq->src_nents; | 439 | return creq->src_nents; |
432 | } | 440 | } |
433 | creq->dst_nents = sg_nents_for_len(req->dst, req->nbytes); | 441 | creq->dst_nents = sg_nents_for_len(req->dst, req->cryptlen); |
434 | if (creq->dst_nents < 0) { | 442 | if (creq->dst_nents < 0) { |
435 | dev_err(cesa_dev->dev, "Invalid number of dst SG"); | 443 | dev_err(cesa_dev->dev, "Invalid number of dst SG"); |
436 | return creq->dst_nents; | 444 | return creq->dst_nents; |
@@ -440,36 +448,36 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req, | |||
440 | CESA_SA_DESC_CFG_OP_MSK); | 448 | CESA_SA_DESC_CFG_OP_MSK); |
441 | 449 | ||
442 | if (cesa_dev->caps->has_tdma) | 450 | if (cesa_dev->caps->has_tdma) |
443 | ret = mv_cesa_ablkcipher_dma_req_init(req, tmpl); | 451 | ret = mv_cesa_skcipher_dma_req_init(req, tmpl); |
444 | else | 452 | else |
445 | ret = mv_cesa_ablkcipher_std_req_init(req, tmpl); | 453 | ret = mv_cesa_skcipher_std_req_init(req, tmpl); |
446 | 454 | ||
447 | return ret; | 455 | return ret; |
448 | } | 456 | } |
449 | 457 | ||
450 | static int mv_cesa_ablkcipher_queue_req(struct ablkcipher_request *req, | 458 | static int mv_cesa_skcipher_queue_req(struct skcipher_request *req, |
451 | struct mv_cesa_op_ctx *tmpl) | 459 | struct mv_cesa_op_ctx *tmpl) |
452 | { | 460 | { |
453 | int ret; | 461 | int ret; |
454 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); | 462 | struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); |
455 | struct mv_cesa_engine *engine; | 463 | struct mv_cesa_engine *engine; |
456 | 464 | ||
457 | ret = mv_cesa_ablkcipher_req_init(req, tmpl); | 465 | ret = mv_cesa_skcipher_req_init(req, tmpl); |
458 | if (ret) | 466 | if (ret) |
459 | return ret; | 467 | return ret; |
460 | 468 | ||
461 | engine = mv_cesa_select_engine(req->nbytes); | 469 | engine = mv_cesa_select_engine(req->cryptlen); |
462 | mv_cesa_ablkcipher_prepare(&req->base, engine); | 470 | mv_cesa_skcipher_prepare(&req->base, engine); |
463 | 471 | ||
464 | ret = mv_cesa_queue_req(&req->base, &creq->base); | 472 | ret = mv_cesa_queue_req(&req->base, &creq->base); |
465 | 473 | ||
466 | if (mv_cesa_req_needs_cleanup(&req->base, ret)) | 474 | if (mv_cesa_req_needs_cleanup(&req->base, ret)) |
467 | mv_cesa_ablkcipher_cleanup(req); | 475 | mv_cesa_skcipher_cleanup(req); |
468 | 476 | ||
469 | return ret; | 477 | return ret; |
470 | } | 478 | } |
471 | 479 | ||
472 | static int mv_cesa_des_op(struct ablkcipher_request *req, | 480 | static int mv_cesa_des_op(struct skcipher_request *req, |
473 | struct mv_cesa_op_ctx *tmpl) | 481 | struct mv_cesa_op_ctx *tmpl) |
474 | { | 482 | { |
475 | struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | 483 | struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm); |
@@ -479,10 +487,10 @@ static int mv_cesa_des_op(struct ablkcipher_request *req, | |||
479 | 487 | ||
480 | memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE); | 488 | memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE); |
481 | 489 | ||
482 | return mv_cesa_ablkcipher_queue_req(req, tmpl); | 490 | return mv_cesa_skcipher_queue_req(req, tmpl); |
483 | } | 491 | } |
484 | 492 | ||
485 | static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req) | 493 | static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req) |
486 | { | 494 | { |
487 | struct mv_cesa_op_ctx tmpl; | 495 | struct mv_cesa_op_ctx tmpl; |
488 | 496 | ||
@@ -493,7 +501,7 @@ static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req) | |||
493 | return mv_cesa_des_op(req, &tmpl); | 501 | return mv_cesa_des_op(req, &tmpl); |
494 | } | 502 | } |
495 | 503 | ||
496 | static int mv_cesa_ecb_des_decrypt(struct ablkcipher_request *req) | 504 | static int mv_cesa_ecb_des_decrypt(struct skcipher_request *req) |
497 | { | 505 | { |
498 | struct mv_cesa_op_ctx tmpl; | 506 | struct mv_cesa_op_ctx tmpl; |
499 | 507 | ||
@@ -504,41 +512,38 @@ static int mv_cesa_ecb_des_decrypt(struct ablkcipher_request *req) | |||
504 | return mv_cesa_des_op(req, &tmpl); | 512 | return mv_cesa_des_op(req, &tmpl); |
505 | } | 513 | } |
506 | 514 | ||
507 | struct crypto_alg mv_cesa_ecb_des_alg = { | 515 | struct skcipher_alg mv_cesa_ecb_des_alg = { |
508 | .cra_name = "ecb(des)", | 516 | .setkey = mv_cesa_des_setkey, |
509 | .cra_driver_name = "mv-ecb-des", | 517 | .encrypt = mv_cesa_ecb_des_encrypt, |
510 | .cra_priority = 300, | 518 | .decrypt = mv_cesa_ecb_des_decrypt, |
511 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | 519 | .min_keysize = DES_KEY_SIZE, |
512 | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, | 520 | .max_keysize = DES_KEY_SIZE, |
513 | .cra_blocksize = DES_BLOCK_SIZE, | 521 | .base = { |
514 | .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), | 522 | .cra_name = "ecb(des)", |
515 | .cra_alignmask = 0, | 523 | .cra_driver_name = "mv-ecb-des", |
516 | .cra_type = &crypto_ablkcipher_type, | 524 | .cra_priority = 300, |
517 | .cra_module = THIS_MODULE, | 525 | .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, |
518 | .cra_init = mv_cesa_ablkcipher_cra_init, | 526 | .cra_blocksize = DES_BLOCK_SIZE, |
519 | .cra_u = { | 527 | .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), |
520 | .ablkcipher = { | 528 | .cra_alignmask = 0, |
521 | .min_keysize = DES_KEY_SIZE, | 529 | .cra_module = THIS_MODULE, |
522 | .max_keysize = DES_KEY_SIZE, | 530 | .cra_init = mv_cesa_skcipher_cra_init, |
523 | .setkey = mv_cesa_des_setkey, | 531 | .cra_exit = mv_cesa_skcipher_cra_exit, |
524 | .encrypt = mv_cesa_ecb_des_encrypt, | ||
525 | .decrypt = mv_cesa_ecb_des_decrypt, | ||
526 | }, | ||
527 | }, | 532 | }, |
528 | }; | 533 | }; |
529 | 534 | ||
530 | static int mv_cesa_cbc_des_op(struct ablkcipher_request *req, | 535 | static int mv_cesa_cbc_des_op(struct skcipher_request *req, |
531 | struct mv_cesa_op_ctx *tmpl) | 536 | struct mv_cesa_op_ctx *tmpl) |
532 | { | 537 | { |
533 | mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC, | 538 | mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC, |
534 | CESA_SA_DESC_CFG_CRYPTCM_MSK); | 539 | CESA_SA_DESC_CFG_CRYPTCM_MSK); |
535 | 540 | ||
536 | memcpy(tmpl->ctx.blkcipher.iv, req->info, DES_BLOCK_SIZE); | 541 | memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES_BLOCK_SIZE); |
537 | 542 | ||
538 | return mv_cesa_des_op(req, tmpl); | 543 | return mv_cesa_des_op(req, tmpl); |
539 | } | 544 | } |
540 | 545 | ||
541 | static int mv_cesa_cbc_des_encrypt(struct ablkcipher_request *req) | 546 | static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req) |
542 | { | 547 | { |
543 | struct mv_cesa_op_ctx tmpl; | 548 | struct mv_cesa_op_ctx tmpl; |
544 | 549 | ||
@@ -547,7 +552,7 @@ static int mv_cesa_cbc_des_encrypt(struct ablkcipher_request *req) | |||
547 | return mv_cesa_cbc_des_op(req, &tmpl); | 552 | return mv_cesa_cbc_des_op(req, &tmpl); |
548 | } | 553 | } |
549 | 554 | ||
550 | static int mv_cesa_cbc_des_decrypt(struct ablkcipher_request *req) | 555 | static int mv_cesa_cbc_des_decrypt(struct skcipher_request *req) |
551 | { | 556 | { |
552 | struct mv_cesa_op_ctx tmpl; | 557 | struct mv_cesa_op_ctx tmpl; |
553 | 558 | ||
@@ -556,31 +561,28 @@ static int mv_cesa_cbc_des_decrypt(struct ablkcipher_request *req) | |||
556 | return mv_cesa_cbc_des_op(req, &tmpl); | 561 | return mv_cesa_cbc_des_op(req, &tmpl); |
557 | } | 562 | } |
558 | 563 | ||
559 | struct crypto_alg mv_cesa_cbc_des_alg = { | 564 | struct skcipher_alg mv_cesa_cbc_des_alg = { |
560 | .cra_name = "cbc(des)", | 565 | .setkey = mv_cesa_des_setkey, |
561 | .cra_driver_name = "mv-cbc-des", | 566 | .encrypt = mv_cesa_cbc_des_encrypt, |
562 | .cra_priority = 300, | 567 | .decrypt = mv_cesa_cbc_des_decrypt, |
563 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | 568 | .min_keysize = DES_KEY_SIZE, |
564 | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, | 569 | .max_keysize = DES_KEY_SIZE, |
565 | .cra_blocksize = DES_BLOCK_SIZE, | 570 | .ivsize = DES_BLOCK_SIZE, |
566 | .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), | 571 | .base = { |
567 | .cra_alignmask = 0, | 572 | .cra_name = "cbc(des)", |
568 | .cra_type = &crypto_ablkcipher_type, | 573 | .cra_driver_name = "mv-cbc-des", |
569 | .cra_module = THIS_MODULE, | 574 | .cra_priority = 300, |
570 | .cra_init = mv_cesa_ablkcipher_cra_init, | 575 | .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, |
571 | .cra_u = { | 576 | .cra_blocksize = DES_BLOCK_SIZE, |
572 | .ablkcipher = { | 577 | .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), |
573 | .min_keysize = DES_KEY_SIZE, | 578 | .cra_alignmask = 0, |
574 | .max_keysize = DES_KEY_SIZE, | 579 | .cra_module = THIS_MODULE, |
575 | .ivsize = DES_BLOCK_SIZE, | 580 | .cra_init = mv_cesa_skcipher_cra_init, |
576 | .setkey = mv_cesa_des_setkey, | 581 | .cra_exit = mv_cesa_skcipher_cra_exit, |
577 | .encrypt = mv_cesa_cbc_des_encrypt, | ||
578 | .decrypt = mv_cesa_cbc_des_decrypt, | ||
579 | }, | ||
580 | }, | 582 | }, |
581 | }; | 583 | }; |
582 | 584 | ||
583 | static int mv_cesa_des3_op(struct ablkcipher_request *req, | 585 | static int mv_cesa_des3_op(struct skcipher_request *req, |
584 | struct mv_cesa_op_ctx *tmpl) | 586 | struct mv_cesa_op_ctx *tmpl) |
585 | { | 587 | { |
586 | struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | 588 | struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm); |
@@ -590,10 +592,10 @@ static int mv_cesa_des3_op(struct ablkcipher_request *req, | |||
590 | 592 | ||
591 | memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE); | 593 | memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE); |
592 | 594 | ||
593 | return mv_cesa_ablkcipher_queue_req(req, tmpl); | 595 | return mv_cesa_skcipher_queue_req(req, tmpl); |
594 | } | 596 | } |
595 | 597 | ||
596 | static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req) | 598 | static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req) |
597 | { | 599 | { |
598 | struct mv_cesa_op_ctx tmpl; | 600 | struct mv_cesa_op_ctx tmpl; |
599 | 601 | ||
@@ -605,7 +607,7 @@ static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req) | |||
605 | return mv_cesa_des3_op(req, &tmpl); | 607 | return mv_cesa_des3_op(req, &tmpl); |
606 | } | 608 | } |
607 | 609 | ||
608 | static int mv_cesa_ecb_des3_ede_decrypt(struct ablkcipher_request *req) | 610 | static int mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request *req) |
609 | { | 611 | { |
610 | struct mv_cesa_op_ctx tmpl; | 612 | struct mv_cesa_op_ctx tmpl; |
611 | 613 | ||
@@ -617,39 +619,36 @@ static int mv_cesa_ecb_des3_ede_decrypt(struct ablkcipher_request *req) | |||
617 | return mv_cesa_des3_op(req, &tmpl); | 619 | return mv_cesa_des3_op(req, &tmpl); |
618 | } | 620 | } |
619 | 621 | ||
620 | struct crypto_alg mv_cesa_ecb_des3_ede_alg = { | 622 | struct skcipher_alg mv_cesa_ecb_des3_ede_alg = { |
621 | .cra_name = "ecb(des3_ede)", | 623 | .setkey = mv_cesa_des3_ede_setkey, |
622 | .cra_driver_name = "mv-ecb-des3-ede", | 624 | .encrypt = mv_cesa_ecb_des3_ede_encrypt, |
623 | .cra_priority = 300, | 625 | .decrypt = mv_cesa_ecb_des3_ede_decrypt, |
624 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | 626 | .min_keysize = DES3_EDE_KEY_SIZE, |
625 | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, | 627 | .max_keysize = DES3_EDE_KEY_SIZE, |
626 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 628 | .ivsize = DES3_EDE_BLOCK_SIZE, |
627 | .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), | 629 | .base = { |
628 | .cra_alignmask = 0, | 630 | .cra_name = "ecb(des3_ede)", |
629 | .cra_type = &crypto_ablkcipher_type, | 631 | .cra_driver_name = "mv-ecb-des3-ede", |
630 | .cra_module = THIS_MODULE, | 632 | .cra_priority = 300, |
631 | .cra_init = mv_cesa_ablkcipher_cra_init, | 633 | .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, |
632 | .cra_u = { | 634 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
633 | .ablkcipher = { | 635 | .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), |
634 | .min_keysize = DES3_EDE_KEY_SIZE, | 636 | .cra_alignmask = 0, |
635 | .max_keysize = DES3_EDE_KEY_SIZE, | 637 | .cra_module = THIS_MODULE, |
636 | .ivsize = DES3_EDE_BLOCK_SIZE, | 638 | .cra_init = mv_cesa_skcipher_cra_init, |
637 | .setkey = mv_cesa_des3_ede_setkey, | 639 | .cra_exit = mv_cesa_skcipher_cra_exit, |
638 | .encrypt = mv_cesa_ecb_des3_ede_encrypt, | ||
639 | .decrypt = mv_cesa_ecb_des3_ede_decrypt, | ||
640 | }, | ||
641 | }, | 640 | }, |
642 | }; | 641 | }; |
643 | 642 | ||
644 | static int mv_cesa_cbc_des3_op(struct ablkcipher_request *req, | 643 | static int mv_cesa_cbc_des3_op(struct skcipher_request *req, |
645 | struct mv_cesa_op_ctx *tmpl) | 644 | struct mv_cesa_op_ctx *tmpl) |
646 | { | 645 | { |
647 | memcpy(tmpl->ctx.blkcipher.iv, req->info, DES3_EDE_BLOCK_SIZE); | 646 | memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES3_EDE_BLOCK_SIZE); |
648 | 647 | ||
649 | return mv_cesa_des3_op(req, tmpl); | 648 | return mv_cesa_des3_op(req, tmpl); |
650 | } | 649 | } |
651 | 650 | ||
652 | static int mv_cesa_cbc_des3_ede_encrypt(struct ablkcipher_request *req) | 651 | static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req) |
653 | { | 652 | { |
654 | struct mv_cesa_op_ctx tmpl; | 653 | struct mv_cesa_op_ctx tmpl; |
655 | 654 | ||
@@ -661,7 +660,7 @@ static int mv_cesa_cbc_des3_ede_encrypt(struct ablkcipher_request *req) | |||
661 | return mv_cesa_cbc_des3_op(req, &tmpl); | 660 | return mv_cesa_cbc_des3_op(req, &tmpl); |
662 | } | 661 | } |
663 | 662 | ||
664 | static int mv_cesa_cbc_des3_ede_decrypt(struct ablkcipher_request *req) | 663 | static int mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request *req) |
665 | { | 664 | { |
666 | struct mv_cesa_op_ctx tmpl; | 665 | struct mv_cesa_op_ctx tmpl; |
667 | 666 | ||
@@ -673,31 +672,28 @@ static int mv_cesa_cbc_des3_ede_decrypt(struct ablkcipher_request *req) | |||
673 | return mv_cesa_cbc_des3_op(req, &tmpl); | 672 | return mv_cesa_cbc_des3_op(req, &tmpl); |
674 | } | 673 | } |
675 | 674 | ||
676 | struct crypto_alg mv_cesa_cbc_des3_ede_alg = { | 675 | struct skcipher_alg mv_cesa_cbc_des3_ede_alg = { |
677 | .cra_name = "cbc(des3_ede)", | 676 | .setkey = mv_cesa_des3_ede_setkey, |
678 | .cra_driver_name = "mv-cbc-des3-ede", | 677 | .encrypt = mv_cesa_cbc_des3_ede_encrypt, |
679 | .cra_priority = 300, | 678 | .decrypt = mv_cesa_cbc_des3_ede_decrypt, |
680 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | 679 | .min_keysize = DES3_EDE_KEY_SIZE, |
681 | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, | 680 | .max_keysize = DES3_EDE_KEY_SIZE, |
682 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 681 | .ivsize = DES3_EDE_BLOCK_SIZE, |
683 | .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), | 682 | .base = { |
684 | .cra_alignmask = 0, | 683 | .cra_name = "cbc(des3_ede)", |
685 | .cra_type = &crypto_ablkcipher_type, | 684 | .cra_driver_name = "mv-cbc-des3-ede", |
686 | .cra_module = THIS_MODULE, | 685 | .cra_priority = 300, |
687 | .cra_init = mv_cesa_ablkcipher_cra_init, | 686 | .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, |
688 | .cra_u = { | 687 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
689 | .ablkcipher = { | 688 | .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), |
690 | .min_keysize = DES3_EDE_KEY_SIZE, | 689 | .cra_alignmask = 0, |
691 | .max_keysize = DES3_EDE_KEY_SIZE, | 690 | .cra_module = THIS_MODULE, |
692 | .ivsize = DES3_EDE_BLOCK_SIZE, | 691 | .cra_init = mv_cesa_skcipher_cra_init, |
693 | .setkey = mv_cesa_des3_ede_setkey, | 692 | .cra_exit = mv_cesa_skcipher_cra_exit, |
694 | .encrypt = mv_cesa_cbc_des3_ede_encrypt, | ||
695 | .decrypt = mv_cesa_cbc_des3_ede_decrypt, | ||
696 | }, | ||
697 | }, | 693 | }, |
698 | }; | 694 | }; |
699 | 695 | ||
700 | static int mv_cesa_aes_op(struct ablkcipher_request *req, | 696 | static int mv_cesa_aes_op(struct skcipher_request *req, |
701 | struct mv_cesa_op_ctx *tmpl) | 697 | struct mv_cesa_op_ctx *tmpl) |
702 | { | 698 | { |
703 | struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | 699 | struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm); |
@@ -724,10 +720,10 @@ static int mv_cesa_aes_op(struct ablkcipher_request *req, | |||
724 | CESA_SA_DESC_CFG_CRYPTM_MSK | | 720 | CESA_SA_DESC_CFG_CRYPTM_MSK | |
725 | CESA_SA_DESC_CFG_AES_LEN_MSK); | 721 | CESA_SA_DESC_CFG_AES_LEN_MSK); |
726 | 722 | ||
727 | return mv_cesa_ablkcipher_queue_req(req, tmpl); | 723 | return mv_cesa_skcipher_queue_req(req, tmpl); |
728 | } | 724 | } |
729 | 725 | ||
730 | static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req) | 726 | static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req) |
731 | { | 727 | { |
732 | struct mv_cesa_op_ctx tmpl; | 728 | struct mv_cesa_op_ctx tmpl; |
733 | 729 | ||
@@ -738,7 +734,7 @@ static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req) | |||
738 | return mv_cesa_aes_op(req, &tmpl); | 734 | return mv_cesa_aes_op(req, &tmpl); |
739 | } | 735 | } |
740 | 736 | ||
741 | static int mv_cesa_ecb_aes_decrypt(struct ablkcipher_request *req) | 737 | static int mv_cesa_ecb_aes_decrypt(struct skcipher_request *req) |
742 | { | 738 | { |
743 | struct mv_cesa_op_ctx tmpl; | 739 | struct mv_cesa_op_ctx tmpl; |
744 | 740 | ||
@@ -749,40 +745,37 @@ static int mv_cesa_ecb_aes_decrypt(struct ablkcipher_request *req) | |||
749 | return mv_cesa_aes_op(req, &tmpl); | 745 | return mv_cesa_aes_op(req, &tmpl); |
750 | } | 746 | } |
751 | 747 | ||
752 | struct crypto_alg mv_cesa_ecb_aes_alg = { | 748 | struct skcipher_alg mv_cesa_ecb_aes_alg = { |
753 | .cra_name = "ecb(aes)", | 749 | .setkey = mv_cesa_aes_setkey, |
754 | .cra_driver_name = "mv-ecb-aes", | 750 | .encrypt = mv_cesa_ecb_aes_encrypt, |
755 | .cra_priority = 300, | 751 | .decrypt = mv_cesa_ecb_aes_decrypt, |
756 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | 752 | .min_keysize = AES_MIN_KEY_SIZE, |
757 | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, | 753 | .max_keysize = AES_MAX_KEY_SIZE, |
758 | .cra_blocksize = AES_BLOCK_SIZE, | 754 | .base = { |
759 | .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), | 755 | .cra_name = "ecb(aes)", |
760 | .cra_alignmask = 0, | 756 | .cra_driver_name = "mv-ecb-aes", |
761 | .cra_type = &crypto_ablkcipher_type, | 757 | .cra_priority = 300, |
762 | .cra_module = THIS_MODULE, | 758 | .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, |
763 | .cra_init = mv_cesa_ablkcipher_cra_init, | 759 | .cra_blocksize = AES_BLOCK_SIZE, |
764 | .cra_u = { | 760 | .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), |
765 | .ablkcipher = { | 761 | .cra_alignmask = 0, |
766 | .min_keysize = AES_MIN_KEY_SIZE, | 762 | .cra_module = THIS_MODULE, |
767 | .max_keysize = AES_MAX_KEY_SIZE, | 763 | .cra_init = mv_cesa_skcipher_cra_init, |
768 | .setkey = mv_cesa_aes_setkey, | 764 | .cra_exit = mv_cesa_skcipher_cra_exit, |
769 | .encrypt = mv_cesa_ecb_aes_encrypt, | ||
770 | .decrypt = mv_cesa_ecb_aes_decrypt, | ||
771 | }, | ||
772 | }, | 765 | }, |
773 | }; | 766 | }; |
774 | 767 | ||
775 | static int mv_cesa_cbc_aes_op(struct ablkcipher_request *req, | 768 | static int mv_cesa_cbc_aes_op(struct skcipher_request *req, |
776 | struct mv_cesa_op_ctx *tmpl) | 769 | struct mv_cesa_op_ctx *tmpl) |
777 | { | 770 | { |
778 | mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC, | 771 | mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC, |
779 | CESA_SA_DESC_CFG_CRYPTCM_MSK); | 772 | CESA_SA_DESC_CFG_CRYPTCM_MSK); |
780 | memcpy(tmpl->ctx.blkcipher.iv, req->info, AES_BLOCK_SIZE); | 773 | memcpy(tmpl->ctx.blkcipher.iv, req->iv, AES_BLOCK_SIZE); |
781 | 774 | ||
782 | return mv_cesa_aes_op(req, tmpl); | 775 | return mv_cesa_aes_op(req, tmpl); |
783 | } | 776 | } |
784 | 777 | ||
785 | static int mv_cesa_cbc_aes_encrypt(struct ablkcipher_request *req) | 778 | static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req) |
786 | { | 779 | { |
787 | struct mv_cesa_op_ctx tmpl; | 780 | struct mv_cesa_op_ctx tmpl; |
788 | 781 | ||
@@ -791,7 +784,7 @@ static int mv_cesa_cbc_aes_encrypt(struct ablkcipher_request *req) | |||
791 | return mv_cesa_cbc_aes_op(req, &tmpl); | 784 | return mv_cesa_cbc_aes_op(req, &tmpl); |
792 | } | 785 | } |
793 | 786 | ||
794 | static int mv_cesa_cbc_aes_decrypt(struct ablkcipher_request *req) | 787 | static int mv_cesa_cbc_aes_decrypt(struct skcipher_request *req) |
795 | { | 788 | { |
796 | struct mv_cesa_op_ctx tmpl; | 789 | struct mv_cesa_op_ctx tmpl; |
797 | 790 | ||
@@ -800,26 +793,23 @@ static int mv_cesa_cbc_aes_decrypt(struct ablkcipher_request *req) | |||
800 | return mv_cesa_cbc_aes_op(req, &tmpl); | 793 | return mv_cesa_cbc_aes_op(req, &tmpl); |
801 | } | 794 | } |
802 | 795 | ||
803 | struct crypto_alg mv_cesa_cbc_aes_alg = { | 796 | struct skcipher_alg mv_cesa_cbc_aes_alg = { |
804 | .cra_name = "cbc(aes)", | 797 | .setkey = mv_cesa_aes_setkey, |
805 | .cra_driver_name = "mv-cbc-aes", | 798 | .encrypt = mv_cesa_cbc_aes_encrypt, |
806 | .cra_priority = 300, | 799 | .decrypt = mv_cesa_cbc_aes_decrypt, |
807 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | 800 | .min_keysize = AES_MIN_KEY_SIZE, |
808 | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, | 801 | .max_keysize = AES_MAX_KEY_SIZE, |
809 | .cra_blocksize = AES_BLOCK_SIZE, | 802 | .ivsize = AES_BLOCK_SIZE, |
810 | .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), | 803 | .base = { |
811 | .cra_alignmask = 0, | 804 | .cra_name = "cbc(aes)", |
812 | .cra_type = &crypto_ablkcipher_type, | 805 | .cra_driver_name = "mv-cbc-aes", |
813 | .cra_module = THIS_MODULE, | 806 | .cra_priority = 300, |
814 | .cra_init = mv_cesa_ablkcipher_cra_init, | 807 | .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, |
815 | .cra_u = { | 808 | .cra_blocksize = AES_BLOCK_SIZE, |
816 | .ablkcipher = { | 809 | .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), |
817 | .min_keysize = AES_MIN_KEY_SIZE, | 810 | .cra_alignmask = 0, |
818 | .max_keysize = AES_MAX_KEY_SIZE, | 811 | .cra_module = THIS_MODULE, |
819 | .ivsize = AES_BLOCK_SIZE, | 812 | .cra_init = mv_cesa_skcipher_cra_init, |
820 | .setkey = mv_cesa_aes_setkey, | 813 | .cra_exit = mv_cesa_skcipher_cra_exit, |
821 | .encrypt = mv_cesa_cbc_aes_encrypt, | ||
822 | .decrypt = mv_cesa_cbc_aes_decrypt, | ||
823 | }, | ||
824 | }, | 814 | }, |
825 | }; | 815 | }; |
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c index c76375ff376d..d0ef171c18df 100644 --- a/drivers/crypto/marvell/tdma.c +++ b/drivers/crypto/marvell/tdma.c | |||
@@ -304,10 +304,7 @@ int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, gfp_t flags) | |||
304 | struct mv_cesa_tdma_desc *tdma; | 304 | struct mv_cesa_tdma_desc *tdma; |
305 | 305 | ||
306 | tdma = mv_cesa_dma_add_desc(chain, flags); | 306 | tdma = mv_cesa_dma_add_desc(chain, flags); |
307 | if (IS_ERR(tdma)) | 307 | return PTR_ERR_OR_ZERO(tdma); |
308 | return PTR_ERR(tdma); | ||
309 | |||
310 | return 0; | ||
311 | } | 308 | } |
312 | 309 | ||
313 | int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, gfp_t flags) | 310 | int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, gfp_t flags) |
diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c index 9e845e866dec..c2058cf59f57 100644 --- a/drivers/crypto/mediatek/mtk-aes.c +++ b/drivers/crypto/mediatek/mtk-aes.c | |||
@@ -13,6 +13,7 @@ | |||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <crypto/aes.h> | 15 | #include <crypto/aes.h> |
16 | #include <crypto/gcm.h> | ||
16 | #include "mtk-platform.h" | 17 | #include "mtk-platform.h" |
17 | 18 | ||
18 | #define AES_QUEUE_SIZE 512 | 19 | #define AES_QUEUE_SIZE 512 |
@@ -137,11 +138,6 @@ struct mtk_aes_gcm_ctx { | |||
137 | struct crypto_skcipher *ctr; | 138 | struct crypto_skcipher *ctr; |
138 | }; | 139 | }; |
139 | 140 | ||
140 | struct mtk_aes_gcm_setkey_result { | ||
141 | int err; | ||
142 | struct completion completion; | ||
143 | }; | ||
144 | |||
145 | struct mtk_aes_drv { | 141 | struct mtk_aes_drv { |
146 | struct list_head dev_list; | 142 | struct list_head dev_list; |
147 | /* Device list lock */ | 143 | /* Device list lock */ |
@@ -928,25 +924,19 @@ static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) | |||
928 | static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode) | 924 | static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode) |
929 | { | 925 | { |
930 | struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); | 926 | struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); |
927 | struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx); | ||
931 | struct mtk_aes_reqctx *rctx = aead_request_ctx(req); | 928 | struct mtk_aes_reqctx *rctx = aead_request_ctx(req); |
932 | 929 | ||
930 | /* Empty messages are not supported yet */ | ||
931 | if (!gctx->textlen && !req->assoclen) | ||
932 | return -EINVAL; | ||
933 | |||
933 | rctx->mode = AES_FLAGS_GCM | mode; | 934 | rctx->mode = AES_FLAGS_GCM | mode; |
934 | 935 | ||
935 | return mtk_aes_handle_queue(ctx->cryp, !!(mode & AES_FLAGS_ENCRYPT), | 936 | return mtk_aes_handle_queue(ctx->cryp, !!(mode & AES_FLAGS_ENCRYPT), |
936 | &req->base); | 937 | &req->base); |
937 | } | 938 | } |
938 | 939 | ||
939 | static void mtk_gcm_setkey_done(struct crypto_async_request *req, int err) | ||
940 | { | ||
941 | struct mtk_aes_gcm_setkey_result *result = req->data; | ||
942 | |||
943 | if (err == -EINPROGRESS) | ||
944 | return; | ||
945 | |||
946 | result->err = err; | ||
947 | complete(&result->completion); | ||
948 | } | ||
949 | |||
950 | /* | 940 | /* |
951 | * Because of the hardware limitation, we need to pre-calculate key(H) | 941 | * Because of the hardware limitation, we need to pre-calculate key(H) |
952 | * for the GHASH operation. The result of the encryption operation | 942 | * for the GHASH operation. The result of the encryption operation |
@@ -962,7 +952,7 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, | |||
962 | u32 hash[4]; | 952 | u32 hash[4]; |
963 | u8 iv[8]; | 953 | u8 iv[8]; |
964 | 954 | ||
965 | struct mtk_aes_gcm_setkey_result result; | 955 | struct crypto_wait wait; |
966 | 956 | ||
967 | struct scatterlist sg[1]; | 957 | struct scatterlist sg[1]; |
968 | struct skcipher_request req; | 958 | struct skcipher_request req; |
@@ -1002,22 +992,17 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, | |||
1002 | if (!data) | 992 | if (!data) |
1003 | return -ENOMEM; | 993 | return -ENOMEM; |
1004 | 994 | ||
1005 | init_completion(&data->result.completion); | 995 | crypto_init_wait(&data->wait); |
1006 | sg_init_one(data->sg, &data->hash, AES_BLOCK_SIZE); | 996 | sg_init_one(data->sg, &data->hash, AES_BLOCK_SIZE); |
1007 | skcipher_request_set_tfm(&data->req, ctr); | 997 | skcipher_request_set_tfm(&data->req, ctr); |
1008 | skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | | 998 | skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | |
1009 | CRYPTO_TFM_REQ_MAY_BACKLOG, | 999 | CRYPTO_TFM_REQ_MAY_BACKLOG, |
1010 | mtk_gcm_setkey_done, &data->result); | 1000 | crypto_req_done, &data->wait); |
1011 | skcipher_request_set_crypt(&data->req, data->sg, data->sg, | 1001 | skcipher_request_set_crypt(&data->req, data->sg, data->sg, |
1012 | AES_BLOCK_SIZE, data->iv); | 1002 | AES_BLOCK_SIZE, data->iv); |
1013 | 1003 | ||
1014 | err = crypto_skcipher_encrypt(&data->req); | 1004 | err = crypto_wait_req(crypto_skcipher_encrypt(&data->req), |
1015 | if (err == -EINPROGRESS || err == -EBUSY) { | 1005 | &data->wait); |
1016 | err = wait_for_completion_interruptible( | ||
1017 | &data->result.completion); | ||
1018 | if (!err) | ||
1019 | err = data->result.err; | ||
1020 | } | ||
1021 | if (err) | 1006 | if (err) |
1022 | goto out; | 1007 | goto out; |
1023 | 1008 | ||
@@ -1098,7 +1083,7 @@ static struct aead_alg aes_gcm_alg = { | |||
1098 | .decrypt = mtk_aes_gcm_decrypt, | 1083 | .decrypt = mtk_aes_gcm_decrypt, |
1099 | .init = mtk_aes_gcm_init, | 1084 | .init = mtk_aes_gcm_init, |
1100 | .exit = mtk_aes_gcm_exit, | 1085 | .exit = mtk_aes_gcm_exit, |
1101 | .ivsize = 12, | 1086 | .ivsize = GCM_AES_IV_SIZE, |
1102 | .maxauthsize = AES_BLOCK_SIZE, | 1087 | .maxauthsize = AES_BLOCK_SIZE, |
1103 | 1088 | ||
1104 | .base = { | 1089 | .base = { |
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c deleted file mode 100644 index 0eb2706f23c8..000000000000 --- a/drivers/crypto/mv_cesa.c +++ /dev/null | |||
@@ -1,1216 +0,0 @@ | |||
1 | /* | ||
2 | * Support for Marvell's crypto engine which can be found on some Orion5X | ||
3 | * boards. | ||
4 | * | ||
5 | * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > | ||
6 | * License: GPLv2 | ||
7 | * | ||
8 | */ | ||
9 | #include <crypto/aes.h> | ||
10 | #include <crypto/algapi.h> | ||
11 | #include <linux/crypto.h> | ||
12 | #include <linux/genalloc.h> | ||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/io.h> | ||
15 | #include <linux/kthread.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <linux/scatterlist.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/clk.h> | ||
21 | #include <crypto/hmac.h> | ||
22 | #include <crypto/internal/hash.h> | ||
23 | #include <crypto/sha.h> | ||
24 | #include <linux/of.h> | ||
25 | #include <linux/of_platform.h> | ||
26 | #include <linux/of_irq.h> | ||
27 | |||
28 | #include "mv_cesa.h" | ||
29 | |||
30 | #define MV_CESA "MV-CESA:" | ||
31 | #define MAX_HW_HASH_SIZE 0xFFFF | ||
32 | #define MV_CESA_EXPIRE 500 /* msec */ | ||
33 | |||
34 | #define MV_CESA_DEFAULT_SRAM_SIZE 2048 | ||
35 | |||
36 | /* | ||
37 | * STM: | ||
38 | * /---------------------------------------\ | ||
39 | * | | request complete | ||
40 | * \./ | | ||
41 | * IDLE -> new request -> BUSY -> done -> DEQUEUE | ||
42 | * /°\ | | ||
43 | * | | more scatter entries | ||
44 | * \________________/ | ||
45 | */ | ||
46 | enum engine_status { | ||
47 | ENGINE_IDLE, | ||
48 | ENGINE_BUSY, | ||
49 | ENGINE_W_DEQUEUE, | ||
50 | }; | ||
51 | |||
52 | /** | ||
53 | * struct req_progress - used for every crypt request | ||
54 | * @src_sg_it: sg iterator for src | ||
55 | * @dst_sg_it: sg iterator for dst | ||
56 | * @sg_src_left: bytes left in src to process (scatter list) | ||
57 | * @src_start: offset to add to src start position (scatter list) | ||
58 | * @crypt_len: length of current hw crypt/hash process | ||
59 | * @hw_nbytes: total bytes to process in hw for this request | ||
60 | * @copy_back: whether to copy data back (crypt) or not (hash) | ||
61 | * @sg_dst_left: bytes left dst to process in this scatter list | ||
62 | * @dst_start: offset to add to dst start position (scatter list) | ||
63 | * @hw_processed_bytes: number of bytes processed by hw (request). | ||
64 | * | ||
65 | * sg helper are used to iterate over the scatterlist. Since the size of the | ||
66 | * SRAM may be less than the scatter size, this struct struct is used to keep | ||
67 | * track of progress within current scatterlist. | ||
68 | */ | ||
69 | struct req_progress { | ||
70 | struct sg_mapping_iter src_sg_it; | ||
71 | struct sg_mapping_iter dst_sg_it; | ||
72 | void (*complete) (void); | ||
73 | void (*process) (int is_first); | ||
74 | |||
75 | /* src mostly */ | ||
76 | int sg_src_left; | ||
77 | int src_start; | ||
78 | int crypt_len; | ||
79 | int hw_nbytes; | ||
80 | /* dst mostly */ | ||
81 | int copy_back; | ||
82 | int sg_dst_left; | ||
83 | int dst_start; | ||
84 | int hw_processed_bytes; | ||
85 | }; | ||
86 | |||
87 | struct crypto_priv { | ||
88 | void __iomem *reg; | ||
89 | void __iomem *sram; | ||
90 | struct gen_pool *sram_pool; | ||
91 | dma_addr_t sram_dma; | ||
92 | int irq; | ||
93 | struct clk *clk; | ||
94 | struct task_struct *queue_th; | ||
95 | |||
96 | /* the lock protects queue and eng_st */ | ||
97 | spinlock_t lock; | ||
98 | struct crypto_queue queue; | ||
99 | enum engine_status eng_st; | ||
100 | struct timer_list completion_timer; | ||
101 | struct crypto_async_request *cur_req; | ||
102 | struct req_progress p; | ||
103 | int max_req_size; | ||
104 | int sram_size; | ||
105 | int has_sha1; | ||
106 | int has_hmac_sha1; | ||
107 | }; | ||
108 | |||
109 | static struct crypto_priv *cpg; | ||
110 | |||
111 | struct mv_ctx { | ||
112 | u8 aes_enc_key[AES_KEY_LEN]; | ||
113 | u32 aes_dec_key[8]; | ||
114 | int key_len; | ||
115 | u32 need_calc_aes_dkey; | ||
116 | }; | ||
117 | |||
118 | enum crypto_op { | ||
119 | COP_AES_ECB, | ||
120 | COP_AES_CBC, | ||
121 | }; | ||
122 | |||
123 | struct mv_req_ctx { | ||
124 | enum crypto_op op; | ||
125 | int decrypt; | ||
126 | }; | ||
127 | |||
128 | enum hash_op { | ||
129 | COP_SHA1, | ||
130 | COP_HMAC_SHA1 | ||
131 | }; | ||
132 | |||
133 | struct mv_tfm_hash_ctx { | ||
134 | struct crypto_shash *fallback; | ||
135 | struct crypto_shash *base_hash; | ||
136 | u32 ivs[2 * SHA1_DIGEST_SIZE / 4]; | ||
137 | int count_add; | ||
138 | enum hash_op op; | ||
139 | }; | ||
140 | |||
141 | struct mv_req_hash_ctx { | ||
142 | u64 count; | ||
143 | u32 state[SHA1_DIGEST_SIZE / 4]; | ||
144 | u8 buffer[SHA1_BLOCK_SIZE]; | ||
145 | int first_hash; /* marks that we don't have previous state */ | ||
146 | int last_chunk; /* marks that this is the 'final' request */ | ||
147 | int extra_bytes; /* unprocessed bytes in buffer */ | ||
148 | enum hash_op op; | ||
149 | int count_add; | ||
150 | }; | ||
151 | |||
152 | static void mv_completion_timer_callback(struct timer_list *unused) | ||
153 | { | ||
154 | int active = readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_EN_SEC_ACCL0; | ||
155 | |||
156 | printk(KERN_ERR MV_CESA | ||
157 | "completion timer expired (CESA %sactive), cleaning up.\n", | ||
158 | active ? "" : "in"); | ||
159 | |||
160 | del_timer(&cpg->completion_timer); | ||
161 | writel(SEC_CMD_DISABLE_SEC, cpg->reg + SEC_ACCEL_CMD); | ||
162 | while(readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_DISABLE_SEC) | ||
163 | printk(KERN_INFO MV_CESA "%s: waiting for engine finishing\n", __func__); | ||
164 | cpg->eng_st = ENGINE_W_DEQUEUE; | ||
165 | wake_up_process(cpg->queue_th); | ||
166 | } | ||
167 | |||
168 | static void mv_setup_timer(void) | ||
169 | { | ||
170 | timer_setup(&cpg->completion_timer, mv_completion_timer_callback, 0); | ||
171 | mod_timer(&cpg->completion_timer, | ||
172 | jiffies + msecs_to_jiffies(MV_CESA_EXPIRE)); | ||
173 | } | ||
174 | |||
175 | static void compute_aes_dec_key(struct mv_ctx *ctx) | ||
176 | { | ||
177 | struct crypto_aes_ctx gen_aes_key; | ||
178 | int key_pos; | ||
179 | |||
180 | if (!ctx->need_calc_aes_dkey) | ||
181 | return; | ||
182 | |||
183 | crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len); | ||
184 | |||
185 | key_pos = ctx->key_len + 24; | ||
186 | memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4); | ||
187 | switch (ctx->key_len) { | ||
188 | case AES_KEYSIZE_256: | ||
189 | key_pos -= 2; | ||
190 | /* fall */ | ||
191 | case AES_KEYSIZE_192: | ||
192 | key_pos -= 2; | ||
193 | memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos], | ||
194 | 4 * 4); | ||
195 | break; | ||
196 | } | ||
197 | ctx->need_calc_aes_dkey = 0; | ||
198 | } | ||
199 | |||
200 | static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key, | ||
201 | unsigned int len) | ||
202 | { | ||
203 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | ||
204 | struct mv_ctx *ctx = crypto_tfm_ctx(tfm); | ||
205 | |||
206 | switch (len) { | ||
207 | case AES_KEYSIZE_128: | ||
208 | case AES_KEYSIZE_192: | ||
209 | case AES_KEYSIZE_256: | ||
210 | break; | ||
211 | default: | ||
212 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
213 | return -EINVAL; | ||
214 | } | ||
215 | ctx->key_len = len; | ||
216 | ctx->need_calc_aes_dkey = 1; | ||
217 | |||
218 | memcpy(ctx->aes_enc_key, key, AES_KEY_LEN); | ||
219 | return 0; | ||
220 | } | ||
221 | |||
222 | static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len) | ||
223 | { | ||
224 | int ret; | ||
225 | void *sbuf; | ||
226 | int copy_len; | ||
227 | |||
228 | while (len) { | ||
229 | if (!p->sg_src_left) { | ||
230 | ret = sg_miter_next(&p->src_sg_it); | ||
231 | BUG_ON(!ret); | ||
232 | p->sg_src_left = p->src_sg_it.length; | ||
233 | p->src_start = 0; | ||
234 | } | ||
235 | |||
236 | sbuf = p->src_sg_it.addr + p->src_start; | ||
237 | |||
238 | copy_len = min(p->sg_src_left, len); | ||
239 | memcpy(dbuf, sbuf, copy_len); | ||
240 | |||
241 | p->src_start += copy_len; | ||
242 | p->sg_src_left -= copy_len; | ||
243 | |||
244 | len -= copy_len; | ||
245 | dbuf += copy_len; | ||
246 | } | ||
247 | } | ||
248 | |||
249 | static void setup_data_in(void) | ||
250 | { | ||
251 | struct req_progress *p = &cpg->p; | ||
252 | int data_in_sram = | ||
253 | min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size); | ||
254 | copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len, | ||
255 | data_in_sram - p->crypt_len); | ||
256 | p->crypt_len = data_in_sram; | ||
257 | } | ||
258 | |||
259 | static void mv_process_current_q(int first_block) | ||
260 | { | ||
261 | struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req); | ||
262 | struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | ||
263 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | ||
264 | struct sec_accel_config op; | ||
265 | |||
266 | switch (req_ctx->op) { | ||
267 | case COP_AES_ECB: | ||
268 | op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB; | ||
269 | break; | ||
270 | case COP_AES_CBC: | ||
271 | default: | ||
272 | op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC; | ||
273 | op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) | | ||
274 | ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF); | ||
275 | if (first_block) | ||
276 | memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16); | ||
277 | break; | ||
278 | } | ||
279 | if (req_ctx->decrypt) { | ||
280 | op.config |= CFG_DIR_DEC; | ||
281 | memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key, | ||
282 | AES_KEY_LEN); | ||
283 | } else { | ||
284 | op.config |= CFG_DIR_ENC; | ||
285 | memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key, | ||
286 | AES_KEY_LEN); | ||
287 | } | ||
288 | |||
289 | switch (ctx->key_len) { | ||
290 | case AES_KEYSIZE_128: | ||
291 | op.config |= CFG_AES_LEN_128; | ||
292 | break; | ||
293 | case AES_KEYSIZE_192: | ||
294 | op.config |= CFG_AES_LEN_192; | ||
295 | break; | ||
296 | case AES_KEYSIZE_256: | ||
297 | op.config |= CFG_AES_LEN_256; | ||
298 | break; | ||
299 | } | ||
300 | op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) | | ||
301 | ENC_P_DST(SRAM_DATA_OUT_START); | ||
302 | op.enc_key_p = SRAM_DATA_KEY_P; | ||
303 | |||
304 | setup_data_in(); | ||
305 | op.enc_len = cpg->p.crypt_len; | ||
306 | memcpy(cpg->sram + SRAM_CONFIG, &op, | ||
307 | sizeof(struct sec_accel_config)); | ||
308 | |||
309 | /* GO */ | ||
310 | mv_setup_timer(); | ||
311 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); | ||
312 | } | ||
313 | |||
314 | static void mv_crypto_algo_completion(void) | ||
315 | { | ||
316 | struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req); | ||
317 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | ||
318 | |||
319 | sg_miter_stop(&cpg->p.src_sg_it); | ||
320 | sg_miter_stop(&cpg->p.dst_sg_it); | ||
321 | |||
322 | if (req_ctx->op != COP_AES_CBC) | ||
323 | return ; | ||
324 | |||
325 | memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16); | ||
326 | } | ||
327 | |||
328 | static void mv_process_hash_current(int first_block) | ||
329 | { | ||
330 | struct ahash_request *req = ahash_request_cast(cpg->cur_req); | ||
331 | const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); | ||
332 | struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); | ||
333 | struct req_progress *p = &cpg->p; | ||
334 | struct sec_accel_config op = { 0 }; | ||
335 | int is_last; | ||
336 | |||
337 | switch (req_ctx->op) { | ||
338 | case COP_SHA1: | ||
339 | default: | ||
340 | op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1; | ||
341 | break; | ||
342 | case COP_HMAC_SHA1: | ||
343 | op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1; | ||
344 | memcpy(cpg->sram + SRAM_HMAC_IV_IN, | ||
345 | tfm_ctx->ivs, sizeof(tfm_ctx->ivs)); | ||
346 | break; | ||
347 | } | ||
348 | |||
349 | op.mac_src_p = | ||
350 | MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32) | ||
351 | req_ctx-> | ||
352 | count); | ||
353 | |||
354 | setup_data_in(); | ||
355 | |||
356 | op.mac_digest = | ||
357 | MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len); | ||
358 | op.mac_iv = | ||
359 | MAC_INNER_IV_P(SRAM_HMAC_IV_IN) | | ||
360 | MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT); | ||
361 | |||
362 | is_last = req_ctx->last_chunk | ||
363 | && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes) | ||
364 | && (req_ctx->count <= MAX_HW_HASH_SIZE); | ||
365 | if (req_ctx->first_hash) { | ||
366 | if (is_last) | ||
367 | op.config |= CFG_NOT_FRAG; | ||
368 | else | ||
369 | op.config |= CFG_FIRST_FRAG; | ||
370 | |||
371 | req_ctx->first_hash = 0; | ||
372 | } else { | ||
373 | if (is_last) | ||
374 | op.config |= CFG_LAST_FRAG; | ||
375 | else | ||
376 | op.config |= CFG_MID_FRAG; | ||
377 | |||
378 | if (first_block) { | ||
379 | writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A); | ||
380 | writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B); | ||
381 | writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C); | ||
382 | writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D); | ||
383 | writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E); | ||
384 | } | ||
385 | } | ||
386 | |||
387 | memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); | ||
388 | |||
389 | /* GO */ | ||
390 | mv_setup_timer(); | ||
391 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); | ||
392 | } | ||
393 | |||
394 | static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx, | ||
395 | struct shash_desc *desc) | ||
396 | { | ||
397 | int i; | ||
398 | struct sha1_state shash_state; | ||
399 | |||
400 | shash_state.count = ctx->count + ctx->count_add; | ||
401 | for (i = 0; i < 5; i++) | ||
402 | shash_state.state[i] = ctx->state[i]; | ||
403 | memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer)); | ||
404 | return crypto_shash_import(desc, &shash_state); | ||
405 | } | ||
406 | |||
407 | static int mv_hash_final_fallback(struct ahash_request *req) | ||
408 | { | ||
409 | const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); | ||
410 | struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); | ||
411 | SHASH_DESC_ON_STACK(shash, tfm_ctx->fallback); | ||
412 | int rc; | ||
413 | |||
414 | shash->tfm = tfm_ctx->fallback; | ||
415 | shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP; | ||
416 | if (unlikely(req_ctx->first_hash)) { | ||
417 | crypto_shash_init(shash); | ||
418 | crypto_shash_update(shash, req_ctx->buffer, | ||
419 | req_ctx->extra_bytes); | ||
420 | } else { | ||
421 | /* only SHA1 for now.... | ||
422 | */ | ||
423 | rc = mv_hash_import_sha1_ctx(req_ctx, shash); | ||
424 | if (rc) | ||
425 | goto out; | ||
426 | } | ||
427 | rc = crypto_shash_final(shash, req->result); | ||
428 | out: | ||
429 | return rc; | ||
430 | } | ||
431 | |||
432 | static void mv_save_digest_state(struct mv_req_hash_ctx *ctx) | ||
433 | { | ||
434 | ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A); | ||
435 | ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B); | ||
436 | ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C); | ||
437 | ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D); | ||
438 | ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E); | ||
439 | } | ||
440 | |||
441 | static void mv_hash_algo_completion(void) | ||
442 | { | ||
443 | struct ahash_request *req = ahash_request_cast(cpg->cur_req); | ||
444 | struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); | ||
445 | |||
446 | if (ctx->extra_bytes) | ||
447 | copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes); | ||
448 | sg_miter_stop(&cpg->p.src_sg_it); | ||
449 | |||
450 | if (likely(ctx->last_chunk)) { | ||
451 | if (likely(ctx->count <= MAX_HW_HASH_SIZE)) { | ||
452 | memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF, | ||
453 | crypto_ahash_digestsize(crypto_ahash_reqtfm | ||
454 | (req))); | ||
455 | } else { | ||
456 | mv_save_digest_state(ctx); | ||
457 | mv_hash_final_fallback(req); | ||
458 | } | ||
459 | } else { | ||
460 | mv_save_digest_state(ctx); | ||
461 | } | ||
462 | } | ||
463 | |||
464 | static void dequeue_complete_req(void) | ||
465 | { | ||
466 | struct crypto_async_request *req = cpg->cur_req; | ||
467 | void *buf; | ||
468 | int ret; | ||
469 | cpg->p.hw_processed_bytes += cpg->p.crypt_len; | ||
470 | if (cpg->p.copy_back) { | ||
471 | int need_copy_len = cpg->p.crypt_len; | ||
472 | int sram_offset = 0; | ||
473 | do { | ||
474 | int dst_copy; | ||
475 | |||
476 | if (!cpg->p.sg_dst_left) { | ||
477 | ret = sg_miter_next(&cpg->p.dst_sg_it); | ||
478 | BUG_ON(!ret); | ||
479 | cpg->p.sg_dst_left = cpg->p.dst_sg_it.length; | ||
480 | cpg->p.dst_start = 0; | ||
481 | } | ||
482 | |||
483 | buf = cpg->p.dst_sg_it.addr; | ||
484 | buf += cpg->p.dst_start; | ||
485 | |||
486 | dst_copy = min(need_copy_len, cpg->p.sg_dst_left); | ||
487 | |||
488 | memcpy(buf, | ||
489 | cpg->sram + SRAM_DATA_OUT_START + sram_offset, | ||
490 | dst_copy); | ||
491 | sram_offset += dst_copy; | ||
492 | cpg->p.sg_dst_left -= dst_copy; | ||
493 | need_copy_len -= dst_copy; | ||
494 | cpg->p.dst_start += dst_copy; | ||
495 | } while (need_copy_len > 0); | ||
496 | } | ||
497 | |||
498 | cpg->p.crypt_len = 0; | ||
499 | |||
500 | BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE); | ||
501 | if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) { | ||
502 | /* process next scatter list entry */ | ||
503 | cpg->eng_st = ENGINE_BUSY; | ||
504 | cpg->p.process(0); | ||
505 | } else { | ||
506 | cpg->p.complete(); | ||
507 | cpg->eng_st = ENGINE_IDLE; | ||
508 | local_bh_disable(); | ||
509 | req->complete(req, 0); | ||
510 | local_bh_enable(); | ||
511 | } | ||
512 | } | ||
513 | |||
514 | static int count_sgs(struct scatterlist *sl, unsigned int total_bytes) | ||
515 | { | ||
516 | int i = 0; | ||
517 | size_t cur_len; | ||
518 | |||
519 | while (sl) { | ||
520 | cur_len = sl[i].length; | ||
521 | ++i; | ||
522 | if (total_bytes > cur_len) | ||
523 | total_bytes -= cur_len; | ||
524 | else | ||
525 | break; | ||
526 | } | ||
527 | |||
528 | return i; | ||
529 | } | ||
530 | |||
531 | static void mv_start_new_crypt_req(struct ablkcipher_request *req) | ||
532 | { | ||
533 | struct req_progress *p = &cpg->p; | ||
534 | int num_sgs; | ||
535 | |||
536 | cpg->cur_req = &req->base; | ||
537 | memset(p, 0, sizeof(struct req_progress)); | ||
538 | p->hw_nbytes = req->nbytes; | ||
539 | p->complete = mv_crypto_algo_completion; | ||
540 | p->process = mv_process_current_q; | ||
541 | p->copy_back = 1; | ||
542 | |||
543 | num_sgs = count_sgs(req->src, req->nbytes); | ||
544 | sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); | ||
545 | |||
546 | num_sgs = count_sgs(req->dst, req->nbytes); | ||
547 | sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG); | ||
548 | |||
549 | mv_process_current_q(1); | ||
550 | } | ||
551 | |||
552 | static void mv_start_new_hash_req(struct ahash_request *req) | ||
553 | { | ||
554 | struct req_progress *p = &cpg->p; | ||
555 | struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); | ||
556 | int num_sgs, hw_bytes, old_extra_bytes, rc; | ||
557 | cpg->cur_req = &req->base; | ||
558 | memset(p, 0, sizeof(struct req_progress)); | ||
559 | hw_bytes = req->nbytes + ctx->extra_bytes; | ||
560 | old_extra_bytes = ctx->extra_bytes; | ||
561 | |||
562 | ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE; | ||
563 | if (ctx->extra_bytes != 0 | ||
564 | && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE)) | ||
565 | hw_bytes -= ctx->extra_bytes; | ||
566 | else | ||
567 | ctx->extra_bytes = 0; | ||
568 | |||
569 | num_sgs = count_sgs(req->src, req->nbytes); | ||
570 | sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); | ||
571 | |||
572 | if (hw_bytes) { | ||
573 | p->hw_nbytes = hw_bytes; | ||
574 | p->complete = mv_hash_algo_completion; | ||
575 | p->process = mv_process_hash_current; | ||
576 | |||
577 | if (unlikely(old_extra_bytes)) { | ||
578 | memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer, | ||
579 | old_extra_bytes); | ||
580 | p->crypt_len = old_extra_bytes; | ||
581 | } | ||
582 | |||
583 | mv_process_hash_current(1); | ||
584 | } else { | ||
585 | copy_src_to_buf(p, ctx->buffer + old_extra_bytes, | ||
586 | ctx->extra_bytes - old_extra_bytes); | ||
587 | sg_miter_stop(&p->src_sg_it); | ||
588 | if (ctx->last_chunk) | ||
589 | rc = mv_hash_final_fallback(req); | ||
590 | else | ||
591 | rc = 0; | ||
592 | cpg->eng_st = ENGINE_IDLE; | ||
593 | local_bh_disable(); | ||
594 | req->base.complete(&req->base, rc); | ||
595 | local_bh_enable(); | ||
596 | } | ||
597 | } | ||
598 | |||
599 | static int queue_manag(void *data) | ||
600 | { | ||
601 | cpg->eng_st = ENGINE_IDLE; | ||
602 | do { | ||
603 | struct crypto_async_request *async_req = NULL; | ||
604 | struct crypto_async_request *backlog = NULL; | ||
605 | |||
606 | __set_current_state(TASK_INTERRUPTIBLE); | ||
607 | |||
608 | if (cpg->eng_st == ENGINE_W_DEQUEUE) | ||
609 | dequeue_complete_req(); | ||
610 | |||
611 | spin_lock_irq(&cpg->lock); | ||
612 | if (cpg->eng_st == ENGINE_IDLE) { | ||
613 | backlog = crypto_get_backlog(&cpg->queue); | ||
614 | async_req = crypto_dequeue_request(&cpg->queue); | ||
615 | if (async_req) { | ||
616 | BUG_ON(cpg->eng_st != ENGINE_IDLE); | ||
617 | cpg->eng_st = ENGINE_BUSY; | ||
618 | } | ||
619 | } | ||
620 | spin_unlock_irq(&cpg->lock); | ||
621 | |||
622 | if (backlog) { | ||
623 | backlog->complete(backlog, -EINPROGRESS); | ||
624 | backlog = NULL; | ||
625 | } | ||
626 | |||
627 | if (async_req) { | ||
628 | if (crypto_tfm_alg_type(async_req->tfm) != | ||
629 | CRYPTO_ALG_TYPE_AHASH) { | ||
630 | struct ablkcipher_request *req = | ||
631 | ablkcipher_request_cast(async_req); | ||
632 | mv_start_new_crypt_req(req); | ||
633 | } else { | ||
634 | struct ahash_request *req = | ||
635 | ahash_request_cast(async_req); | ||
636 | mv_start_new_hash_req(req); | ||
637 | } | ||
638 | async_req = NULL; | ||
639 | } | ||
640 | |||
641 | schedule(); | ||
642 | |||
643 | } while (!kthread_should_stop()); | ||
644 | return 0; | ||
645 | } | ||
646 | |||
647 | static int mv_handle_req(struct crypto_async_request *req) | ||
648 | { | ||
649 | unsigned long flags; | ||
650 | int ret; | ||
651 | |||
652 | spin_lock_irqsave(&cpg->lock, flags); | ||
653 | ret = crypto_enqueue_request(&cpg->queue, req); | ||
654 | spin_unlock_irqrestore(&cpg->lock, flags); | ||
655 | wake_up_process(cpg->queue_th); | ||
656 | return ret; | ||
657 | } | ||
658 | |||
659 | static int mv_enc_aes_ecb(struct ablkcipher_request *req) | ||
660 | { | ||
661 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | ||
662 | |||
663 | req_ctx->op = COP_AES_ECB; | ||
664 | req_ctx->decrypt = 0; | ||
665 | |||
666 | return mv_handle_req(&req->base); | ||
667 | } | ||
668 | |||
669 | static int mv_dec_aes_ecb(struct ablkcipher_request *req) | ||
670 | { | ||
671 | struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | ||
672 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | ||
673 | |||
674 | req_ctx->op = COP_AES_ECB; | ||
675 | req_ctx->decrypt = 1; | ||
676 | |||
677 | compute_aes_dec_key(ctx); | ||
678 | return mv_handle_req(&req->base); | ||
679 | } | ||
680 | |||
681 | static int mv_enc_aes_cbc(struct ablkcipher_request *req) | ||
682 | { | ||
683 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | ||
684 | |||
685 | req_ctx->op = COP_AES_CBC; | ||
686 | req_ctx->decrypt = 0; | ||
687 | |||
688 | return mv_handle_req(&req->base); | ||
689 | } | ||
690 | |||
691 | static int mv_dec_aes_cbc(struct ablkcipher_request *req) | ||
692 | { | ||
693 | struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | ||
694 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | ||
695 | |||
696 | req_ctx->op = COP_AES_CBC; | ||
697 | req_ctx->decrypt = 1; | ||
698 | |||
699 | compute_aes_dec_key(ctx); | ||
700 | return mv_handle_req(&req->base); | ||
701 | } | ||
702 | |||
703 | static int mv_cra_init(struct crypto_tfm *tfm) | ||
704 | { | ||
705 | tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx); | ||
706 | return 0; | ||
707 | } | ||
708 | |||
709 | static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op, | ||
710 | int is_last, unsigned int req_len, | ||
711 | int count_add) | ||
712 | { | ||
713 | memset(ctx, 0, sizeof(*ctx)); | ||
714 | ctx->op = op; | ||
715 | ctx->count = req_len; | ||
716 | ctx->first_hash = 1; | ||
717 | ctx->last_chunk = is_last; | ||
718 | ctx->count_add = count_add; | ||
719 | } | ||
720 | |||
721 | static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last, | ||
722 | unsigned req_len) | ||
723 | { | ||
724 | ctx->last_chunk = is_last; | ||
725 | ctx->count += req_len; | ||
726 | } | ||
727 | |||
728 | static int mv_hash_init(struct ahash_request *req) | ||
729 | { | ||
730 | const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); | ||
731 | mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0, | ||
732 | tfm_ctx->count_add); | ||
733 | return 0; | ||
734 | } | ||
735 | |||
736 | static int mv_hash_update(struct ahash_request *req) | ||
737 | { | ||
738 | if (!req->nbytes) | ||
739 | return 0; | ||
740 | |||
741 | mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes); | ||
742 | return mv_handle_req(&req->base); | ||
743 | } | ||
744 | |||
745 | static int mv_hash_final(struct ahash_request *req) | ||
746 | { | ||
747 | struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); | ||
748 | |||
749 | ahash_request_set_crypt(req, NULL, req->result, 0); | ||
750 | mv_update_hash_req_ctx(ctx, 1, 0); | ||
751 | return mv_handle_req(&req->base); | ||
752 | } | ||
753 | |||
754 | static int mv_hash_finup(struct ahash_request *req) | ||
755 | { | ||
756 | mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes); | ||
757 | return mv_handle_req(&req->base); | ||
758 | } | ||
759 | |||
760 | static int mv_hash_digest(struct ahash_request *req) | ||
761 | { | ||
762 | const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); | ||
763 | mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1, | ||
764 | req->nbytes, tfm_ctx->count_add); | ||
765 | return mv_handle_req(&req->base); | ||
766 | } | ||
767 | |||
768 | static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate, | ||
769 | const void *ostate) | ||
770 | { | ||
771 | const struct sha1_state *isha1_state = istate, *osha1_state = ostate; | ||
772 | int i; | ||
773 | for (i = 0; i < 5; i++) { | ||
774 | ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]); | ||
775 | ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]); | ||
776 | } | ||
777 | } | ||
778 | |||
779 | static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key, | ||
780 | unsigned int keylen) | ||
781 | { | ||
782 | int rc; | ||
783 | struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base); | ||
784 | int bs, ds, ss; | ||
785 | |||
786 | if (!ctx->base_hash) | ||
787 | return 0; | ||
788 | |||
789 | rc = crypto_shash_setkey(ctx->fallback, key, keylen); | ||
790 | if (rc) | ||
791 | return rc; | ||
792 | |||
793 | /* Can't see a way to extract the ipad/opad from the fallback tfm | ||
794 | so I'm basically copying code from the hmac module */ | ||
795 | bs = crypto_shash_blocksize(ctx->base_hash); | ||
796 | ds = crypto_shash_digestsize(ctx->base_hash); | ||
797 | ss = crypto_shash_statesize(ctx->base_hash); | ||
798 | |||
799 | { | ||
800 | SHASH_DESC_ON_STACK(shash, ctx->base_hash); | ||
801 | |||
802 | unsigned int i; | ||
803 | char ipad[ss]; | ||
804 | char opad[ss]; | ||
805 | |||
806 | shash->tfm = ctx->base_hash; | ||
807 | shash->flags = crypto_shash_get_flags(ctx->base_hash) & | ||
808 | CRYPTO_TFM_REQ_MAY_SLEEP; | ||
809 | |||
810 | if (keylen > bs) { | ||
811 | int err; | ||
812 | |||
813 | err = | ||
814 | crypto_shash_digest(shash, key, keylen, ipad); | ||
815 | if (err) | ||
816 | return err; | ||
817 | |||
818 | keylen = ds; | ||
819 | } else | ||
820 | memcpy(ipad, key, keylen); | ||
821 | |||
822 | memset(ipad + keylen, 0, bs - keylen); | ||
823 | memcpy(opad, ipad, bs); | ||
824 | |||
825 | for (i = 0; i < bs; i++) { | ||
826 | ipad[i] ^= HMAC_IPAD_VALUE; | ||
827 | opad[i] ^= HMAC_OPAD_VALUE; | ||
828 | } | ||
829 | |||
830 | rc = crypto_shash_init(shash) ? : | ||
831 | crypto_shash_update(shash, ipad, bs) ? : | ||
832 | crypto_shash_export(shash, ipad) ? : | ||
833 | crypto_shash_init(shash) ? : | ||
834 | crypto_shash_update(shash, opad, bs) ? : | ||
835 | crypto_shash_export(shash, opad); | ||
836 | |||
837 | if (rc == 0) | ||
838 | mv_hash_init_ivs(ctx, ipad, opad); | ||
839 | |||
840 | return rc; | ||
841 | } | ||
842 | } | ||
843 | |||
844 | static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name, | ||
845 | enum hash_op op, int count_add) | ||
846 | { | ||
847 | const char *fallback_driver_name = crypto_tfm_alg_name(tfm); | ||
848 | struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm); | ||
849 | struct crypto_shash *fallback_tfm = NULL; | ||
850 | struct crypto_shash *base_hash = NULL; | ||
851 | int err = -ENOMEM; | ||
852 | |||
853 | ctx->op = op; | ||
854 | ctx->count_add = count_add; | ||
855 | |||
856 | /* Allocate a fallback and abort if it failed. */ | ||
857 | fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0, | ||
858 | CRYPTO_ALG_NEED_FALLBACK); | ||
859 | if (IS_ERR(fallback_tfm)) { | ||
860 | printk(KERN_WARNING MV_CESA | ||
861 | "Fallback driver '%s' could not be loaded!\n", | ||
862 | fallback_driver_name); | ||
863 | err = PTR_ERR(fallback_tfm); | ||
864 | goto out; | ||
865 | } | ||
866 | ctx->fallback = fallback_tfm; | ||
867 | |||
868 | if (base_hash_name) { | ||
869 | /* Allocate a hash to compute the ipad/opad of hmac. */ | ||
870 | base_hash = crypto_alloc_shash(base_hash_name, 0, | ||
871 | CRYPTO_ALG_NEED_FALLBACK); | ||
872 | if (IS_ERR(base_hash)) { | ||
873 | printk(KERN_WARNING MV_CESA | ||
874 | "Base driver '%s' could not be loaded!\n", | ||
875 | base_hash_name); | ||
876 | err = PTR_ERR(base_hash); | ||
877 | goto err_bad_base; | ||
878 | } | ||
879 | } | ||
880 | ctx->base_hash = base_hash; | ||
881 | |||
882 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | ||
883 | sizeof(struct mv_req_hash_ctx) + | ||
884 | crypto_shash_descsize(ctx->fallback)); | ||
885 | return 0; | ||
886 | err_bad_base: | ||
887 | crypto_free_shash(fallback_tfm); | ||
888 | out: | ||
889 | return err; | ||
890 | } | ||
891 | |||
892 | static void mv_cra_hash_exit(struct crypto_tfm *tfm) | ||
893 | { | ||
894 | struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm); | ||
895 | |||
896 | crypto_free_shash(ctx->fallback); | ||
897 | if (ctx->base_hash) | ||
898 | crypto_free_shash(ctx->base_hash); | ||
899 | } | ||
900 | |||
901 | static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm) | ||
902 | { | ||
903 | return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0); | ||
904 | } | ||
905 | |||
906 | static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm) | ||
907 | { | ||
908 | return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE); | ||
909 | } | ||
910 | |||
911 | static irqreturn_t crypto_int(int irq, void *priv) | ||
912 | { | ||
913 | u32 val; | ||
914 | |||
915 | val = readl(cpg->reg + SEC_ACCEL_INT_STATUS); | ||
916 | if (!(val & SEC_INT_ACCEL0_DONE)) | ||
917 | return IRQ_NONE; | ||
918 | |||
919 | if (!del_timer(&cpg->completion_timer)) { | ||
920 | printk(KERN_WARNING MV_CESA | ||
921 | "got an interrupt but no pending timer?\n"); | ||
922 | } | ||
923 | val &= ~SEC_INT_ACCEL0_DONE; | ||
924 | writel(val, cpg->reg + FPGA_INT_STATUS); | ||
925 | writel(val, cpg->reg + SEC_ACCEL_INT_STATUS); | ||
926 | BUG_ON(cpg->eng_st != ENGINE_BUSY); | ||
927 | cpg->eng_st = ENGINE_W_DEQUEUE; | ||
928 | wake_up_process(cpg->queue_th); | ||
929 | return IRQ_HANDLED; | ||
930 | } | ||
931 | |||
932 | static struct crypto_alg mv_aes_alg_ecb = { | ||
933 | .cra_name = "ecb(aes)", | ||
934 | .cra_driver_name = "mv-ecb-aes", | ||
935 | .cra_priority = 300, | ||
936 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
937 | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, | ||
938 | .cra_blocksize = 16, | ||
939 | .cra_ctxsize = sizeof(struct mv_ctx), | ||
940 | .cra_alignmask = 0, | ||
941 | .cra_type = &crypto_ablkcipher_type, | ||
942 | .cra_module = THIS_MODULE, | ||
943 | .cra_init = mv_cra_init, | ||
944 | .cra_u = { | ||
945 | .ablkcipher = { | ||
946 | .min_keysize = AES_MIN_KEY_SIZE, | ||
947 | .max_keysize = AES_MAX_KEY_SIZE, | ||
948 | .setkey = mv_setkey_aes, | ||
949 | .encrypt = mv_enc_aes_ecb, | ||
950 | .decrypt = mv_dec_aes_ecb, | ||
951 | }, | ||
952 | }, | ||
953 | }; | ||
954 | |||
955 | static struct crypto_alg mv_aes_alg_cbc = { | ||
956 | .cra_name = "cbc(aes)", | ||
957 | .cra_driver_name = "mv-cbc-aes", | ||
958 | .cra_priority = 300, | ||
959 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
960 | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, | ||
961 | .cra_blocksize = AES_BLOCK_SIZE, | ||
962 | .cra_ctxsize = sizeof(struct mv_ctx), | ||
963 | .cra_alignmask = 0, | ||
964 | .cra_type = &crypto_ablkcipher_type, | ||
965 | .cra_module = THIS_MODULE, | ||
966 | .cra_init = mv_cra_init, | ||
967 | .cra_u = { | ||
968 | .ablkcipher = { | ||
969 | .ivsize = AES_BLOCK_SIZE, | ||
970 | .min_keysize = AES_MIN_KEY_SIZE, | ||
971 | .max_keysize = AES_MAX_KEY_SIZE, | ||
972 | .setkey = mv_setkey_aes, | ||
973 | .encrypt = mv_enc_aes_cbc, | ||
974 | .decrypt = mv_dec_aes_cbc, | ||
975 | }, | ||
976 | }, | ||
977 | }; | ||
978 | |||
979 | static struct ahash_alg mv_sha1_alg = { | ||
980 | .init = mv_hash_init, | ||
981 | .update = mv_hash_update, | ||
982 | .final = mv_hash_final, | ||
983 | .finup = mv_hash_finup, | ||
984 | .digest = mv_hash_digest, | ||
985 | .halg = { | ||
986 | .digestsize = SHA1_DIGEST_SIZE, | ||
987 | .base = { | ||
988 | .cra_name = "sha1", | ||
989 | .cra_driver_name = "mv-sha1", | ||
990 | .cra_priority = 300, | ||
991 | .cra_flags = | ||
992 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY | | ||
993 | CRYPTO_ALG_NEED_FALLBACK, | ||
994 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
995 | .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx), | ||
996 | .cra_init = mv_cra_hash_sha1_init, | ||
997 | .cra_exit = mv_cra_hash_exit, | ||
998 | .cra_module = THIS_MODULE, | ||
999 | } | ||
1000 | } | ||
1001 | }; | ||
1002 | |||
1003 | static struct ahash_alg mv_hmac_sha1_alg = { | ||
1004 | .init = mv_hash_init, | ||
1005 | .update = mv_hash_update, | ||
1006 | .final = mv_hash_final, | ||
1007 | .finup = mv_hash_finup, | ||
1008 | .digest = mv_hash_digest, | ||
1009 | .setkey = mv_hash_setkey, | ||
1010 | .halg = { | ||
1011 | .digestsize = SHA1_DIGEST_SIZE, | ||
1012 | .base = { | ||
1013 | .cra_name = "hmac(sha1)", | ||
1014 | .cra_driver_name = "mv-hmac-sha1", | ||
1015 | .cra_priority = 300, | ||
1016 | .cra_flags = | ||
1017 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY | | ||
1018 | CRYPTO_ALG_NEED_FALLBACK, | ||
1019 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
1020 | .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx), | ||
1021 | .cra_init = mv_cra_hash_hmac_sha1_init, | ||
1022 | .cra_exit = mv_cra_hash_exit, | ||
1023 | .cra_module = THIS_MODULE, | ||
1024 | } | ||
1025 | } | ||
1026 | }; | ||
1027 | |||
1028 | static int mv_cesa_get_sram(struct platform_device *pdev, | ||
1029 | struct crypto_priv *cp) | ||
1030 | { | ||
1031 | struct resource *res; | ||
1032 | u32 sram_size = MV_CESA_DEFAULT_SRAM_SIZE; | ||
1033 | |||
1034 | of_property_read_u32(pdev->dev.of_node, "marvell,crypto-sram-size", | ||
1035 | &sram_size); | ||
1036 | |||
1037 | cp->sram_size = sram_size; | ||
1038 | cp->sram_pool = of_gen_pool_get(pdev->dev.of_node, | ||
1039 | "marvell,crypto-srams", 0); | ||
1040 | if (cp->sram_pool) { | ||
1041 | cp->sram = gen_pool_dma_alloc(cp->sram_pool, sram_size, | ||
1042 | &cp->sram_dma); | ||
1043 | if (cp->sram) | ||
1044 | return 0; | ||
1045 | |||
1046 | return -ENOMEM; | ||
1047 | } | ||
1048 | |||
1049 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | ||
1050 | "sram"); | ||
1051 | if (!res || resource_size(res) < cp->sram_size) | ||
1052 | return -EINVAL; | ||
1053 | |||
1054 | cp->sram = devm_ioremap_resource(&pdev->dev, res); | ||
1055 | if (IS_ERR(cp->sram)) | ||
1056 | return PTR_ERR(cp->sram); | ||
1057 | |||
1058 | return 0; | ||
1059 | } | ||
1060 | |||
1061 | static int mv_probe(struct platform_device *pdev) | ||
1062 | { | ||
1063 | struct crypto_priv *cp; | ||
1064 | struct resource *res; | ||
1065 | int irq; | ||
1066 | int ret; | ||
1067 | |||
1068 | if (cpg) { | ||
1069 | printk(KERN_ERR MV_CESA "Second crypto dev?\n"); | ||
1070 | return -EEXIST; | ||
1071 | } | ||
1072 | |||
1073 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); | ||
1074 | if (!res) | ||
1075 | return -ENXIO; | ||
1076 | |||
1077 | cp = devm_kzalloc(&pdev->dev, sizeof(*cp), GFP_KERNEL); | ||
1078 | if (!cp) | ||
1079 | return -ENOMEM; | ||
1080 | |||
1081 | spin_lock_init(&cp->lock); | ||
1082 | crypto_init_queue(&cp->queue, 50); | ||
1083 | cp->reg = devm_ioremap_resource(&pdev->dev, res); | ||
1084 | if (IS_ERR(cp->reg)) { | ||
1085 | ret = PTR_ERR(cp->reg); | ||
1086 | goto err; | ||
1087 | } | ||
1088 | |||
1089 | ret = mv_cesa_get_sram(pdev, cp); | ||
1090 | if (ret) | ||
1091 | goto err; | ||
1092 | |||
1093 | cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE; | ||
1094 | |||
1095 | irq = platform_get_irq(pdev, 0); | ||
1096 | if (irq < 0) { | ||
1097 | ret = irq; | ||
1098 | goto err; | ||
1099 | } | ||
1100 | cp->irq = irq; | ||
1101 | |||
1102 | platform_set_drvdata(pdev, cp); | ||
1103 | cpg = cp; | ||
1104 | |||
1105 | cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto"); | ||
1106 | if (IS_ERR(cp->queue_th)) { | ||
1107 | ret = PTR_ERR(cp->queue_th); | ||
1108 | goto err; | ||
1109 | } | ||
1110 | |||
1111 | ret = request_irq(irq, crypto_int, 0, dev_name(&pdev->dev), | ||
1112 | cp); | ||
1113 | if (ret) | ||
1114 | goto err_thread; | ||
1115 | |||
1116 | /* Not all platforms can gate the clock, so it is not | ||
1117 | an error if the clock does not exists. */ | ||
1118 | cp->clk = clk_get(&pdev->dev, NULL); | ||
1119 | if (!IS_ERR(cp->clk)) | ||
1120 | clk_prepare_enable(cp->clk); | ||
1121 | |||
1122 | writel(0, cpg->reg + SEC_ACCEL_INT_STATUS); | ||
1123 | writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); | ||
1124 | writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); | ||
1125 | writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); | ||
1126 | |||
1127 | ret = crypto_register_alg(&mv_aes_alg_ecb); | ||
1128 | if (ret) { | ||
1129 | printk(KERN_WARNING MV_CESA | ||
1130 | "Could not register aes-ecb driver\n"); | ||
1131 | goto err_irq; | ||
1132 | } | ||
1133 | |||
1134 | ret = crypto_register_alg(&mv_aes_alg_cbc); | ||
1135 | if (ret) { | ||
1136 | printk(KERN_WARNING MV_CESA | ||
1137 | "Could not register aes-cbc driver\n"); | ||
1138 | goto err_unreg_ecb; | ||
1139 | } | ||
1140 | |||
1141 | ret = crypto_register_ahash(&mv_sha1_alg); | ||
1142 | if (ret == 0) | ||
1143 | cpg->has_sha1 = 1; | ||
1144 | else | ||
1145 | printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n"); | ||
1146 | |||
1147 | ret = crypto_register_ahash(&mv_hmac_sha1_alg); | ||
1148 | if (ret == 0) { | ||
1149 | cpg->has_hmac_sha1 = 1; | ||
1150 | } else { | ||
1151 | printk(KERN_WARNING MV_CESA | ||
1152 | "Could not register hmac-sha1 driver\n"); | ||
1153 | } | ||
1154 | |||
1155 | return 0; | ||
1156 | err_unreg_ecb: | ||
1157 | crypto_unregister_alg(&mv_aes_alg_ecb); | ||
1158 | err_irq: | ||
1159 | free_irq(irq, cp); | ||
1160 | if (!IS_ERR(cp->clk)) { | ||
1161 | clk_disable_unprepare(cp->clk); | ||
1162 | clk_put(cp->clk); | ||
1163 | } | ||
1164 | err_thread: | ||
1165 | kthread_stop(cp->queue_th); | ||
1166 | err: | ||
1167 | cpg = NULL; | ||
1168 | return ret; | ||
1169 | } | ||
1170 | |||
1171 | static int mv_remove(struct platform_device *pdev) | ||
1172 | { | ||
1173 | struct crypto_priv *cp = platform_get_drvdata(pdev); | ||
1174 | |||
1175 | crypto_unregister_alg(&mv_aes_alg_ecb); | ||
1176 | crypto_unregister_alg(&mv_aes_alg_cbc); | ||
1177 | if (cp->has_sha1) | ||
1178 | crypto_unregister_ahash(&mv_sha1_alg); | ||
1179 | if (cp->has_hmac_sha1) | ||
1180 | crypto_unregister_ahash(&mv_hmac_sha1_alg); | ||
1181 | kthread_stop(cp->queue_th); | ||
1182 | free_irq(cp->irq, cp); | ||
1183 | memset(cp->sram, 0, cp->sram_size); | ||
1184 | |||
1185 | if (!IS_ERR(cp->clk)) { | ||
1186 | clk_disable_unprepare(cp->clk); | ||
1187 | clk_put(cp->clk); | ||
1188 | } | ||
1189 | |||
1190 | cpg = NULL; | ||
1191 | return 0; | ||
1192 | } | ||
1193 | |||
1194 | static const struct of_device_id mv_cesa_of_match_table[] = { | ||
1195 | { .compatible = "marvell,orion-crypto", }, | ||
1196 | { .compatible = "marvell,kirkwood-crypto", }, | ||
1197 | { .compatible = "marvell,dove-crypto", }, | ||
1198 | {} | ||
1199 | }; | ||
1200 | MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table); | ||
1201 | |||
1202 | static struct platform_driver marvell_crypto = { | ||
1203 | .probe = mv_probe, | ||
1204 | .remove = mv_remove, | ||
1205 | .driver = { | ||
1206 | .name = "mv_crypto", | ||
1207 | .of_match_table = mv_cesa_of_match_table, | ||
1208 | }, | ||
1209 | }; | ||
1210 | MODULE_ALIAS("platform:mv_crypto"); | ||
1211 | |||
1212 | module_platform_driver(marvell_crypto); | ||
1213 | |||
1214 | MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>"); | ||
1215 | MODULE_DESCRIPTION("Support for Marvell's cryptographic engine"); | ||
1216 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h deleted file mode 100644 index 0be3f0aa4afd..000000000000 --- a/drivers/crypto/mv_cesa.h +++ /dev/null | |||
@@ -1,151 +0,0 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | #ifndef __MV_CRYPTO_H__ | ||
3 | #define __MV_CRYPTO_H__ | ||
4 | |||
5 | #define DIGEST_INITIAL_VAL_A 0xdd00 | ||
6 | #define DIGEST_INITIAL_VAL_B 0xdd04 | ||
7 | #define DIGEST_INITIAL_VAL_C 0xdd08 | ||
8 | #define DIGEST_INITIAL_VAL_D 0xdd0c | ||
9 | #define DIGEST_INITIAL_VAL_E 0xdd10 | ||
10 | #define DES_CMD_REG 0xdd58 | ||
11 | |||
12 | #define SEC_ACCEL_CMD 0xde00 | ||
13 | #define SEC_CMD_EN_SEC_ACCL0 (1 << 0) | ||
14 | #define SEC_CMD_EN_SEC_ACCL1 (1 << 1) | ||
15 | #define SEC_CMD_DISABLE_SEC (1 << 2) | ||
16 | |||
17 | #define SEC_ACCEL_DESC_P0 0xde04 | ||
18 | #define SEC_DESC_P0_PTR(x) (x) | ||
19 | |||
20 | #define SEC_ACCEL_DESC_P1 0xde14 | ||
21 | #define SEC_DESC_P1_PTR(x) (x) | ||
22 | |||
23 | #define SEC_ACCEL_CFG 0xde08 | ||
24 | #define SEC_CFG_STOP_DIG_ERR (1 << 0) | ||
25 | #define SEC_CFG_CH0_W_IDMA (1 << 7) | ||
26 | #define SEC_CFG_CH1_W_IDMA (1 << 8) | ||
27 | #define SEC_CFG_ACT_CH0_IDMA (1 << 9) | ||
28 | #define SEC_CFG_ACT_CH1_IDMA (1 << 10) | ||
29 | |||
30 | #define SEC_ACCEL_STATUS 0xde0c | ||
31 | #define SEC_ST_ACT_0 (1 << 0) | ||
32 | #define SEC_ST_ACT_1 (1 << 1) | ||
33 | |||
34 | /* | ||
35 | * FPGA_INT_STATUS looks like a FPGA leftover and is documented only in Errata | ||
36 | * 4.12. It looks like that it was part of an IRQ-controller in FPGA and | ||
37 | * someone forgot to remove it while switching to the core and moving to | ||
38 | * SEC_ACCEL_INT_STATUS. | ||
39 | */ | ||
40 | #define FPGA_INT_STATUS 0xdd68 | ||
41 | #define SEC_ACCEL_INT_STATUS 0xde20 | ||
42 | #define SEC_INT_AUTH_DONE (1 << 0) | ||
43 | #define SEC_INT_DES_E_DONE (1 << 1) | ||
44 | #define SEC_INT_AES_E_DONE (1 << 2) | ||
45 | #define SEC_INT_AES_D_DONE (1 << 3) | ||
46 | #define SEC_INT_ENC_DONE (1 << 4) | ||
47 | #define SEC_INT_ACCEL0_DONE (1 << 5) | ||
48 | #define SEC_INT_ACCEL1_DONE (1 << 6) | ||
49 | #define SEC_INT_ACC0_IDMA_DONE (1 << 7) | ||
50 | #define SEC_INT_ACC1_IDMA_DONE (1 << 8) | ||
51 | |||
52 | #define SEC_ACCEL_INT_MASK 0xde24 | ||
53 | |||
54 | #define AES_KEY_LEN (8 * 4) | ||
55 | |||
56 | struct sec_accel_config { | ||
57 | |||
58 | u32 config; | ||
59 | #define CFG_OP_MAC_ONLY 0 | ||
60 | #define CFG_OP_CRYPT_ONLY 1 | ||
61 | #define CFG_OP_MAC_CRYPT 2 | ||
62 | #define CFG_OP_CRYPT_MAC 3 | ||
63 | #define CFG_MACM_MD5 (4 << 4) | ||
64 | #define CFG_MACM_SHA1 (5 << 4) | ||
65 | #define CFG_MACM_HMAC_MD5 (6 << 4) | ||
66 | #define CFG_MACM_HMAC_SHA1 (7 << 4) | ||
67 | #define CFG_ENCM_DES (1 << 8) | ||
68 | #define CFG_ENCM_3DES (2 << 8) | ||
69 | #define CFG_ENCM_AES (3 << 8) | ||
70 | #define CFG_DIR_ENC (0 << 12) | ||
71 | #define CFG_DIR_DEC (1 << 12) | ||
72 | #define CFG_ENC_MODE_ECB (0 << 16) | ||
73 | #define CFG_ENC_MODE_CBC (1 << 16) | ||
74 | #define CFG_3DES_EEE (0 << 20) | ||
75 | #define CFG_3DES_EDE (1 << 20) | ||
76 | #define CFG_AES_LEN_128 (0 << 24) | ||
77 | #define CFG_AES_LEN_192 (1 << 24) | ||
78 | #define CFG_AES_LEN_256 (2 << 24) | ||
79 | #define CFG_NOT_FRAG (0 << 30) | ||
80 | #define CFG_FIRST_FRAG (1 << 30) | ||
81 | #define CFG_LAST_FRAG (2 << 30) | ||
82 | #define CFG_MID_FRAG (3 << 30) | ||
83 | |||
84 | u32 enc_p; | ||
85 | #define ENC_P_SRC(x) (x) | ||
86 | #define ENC_P_DST(x) ((x) << 16) | ||
87 | |||
88 | u32 enc_len; | ||
89 | #define ENC_LEN(x) (x) | ||
90 | |||
91 | u32 enc_key_p; | ||
92 | #define ENC_KEY_P(x) (x) | ||
93 | |||
94 | u32 enc_iv; | ||
95 | #define ENC_IV_POINT(x) ((x) << 0) | ||
96 | #define ENC_IV_BUF_POINT(x) ((x) << 16) | ||
97 | |||
98 | u32 mac_src_p; | ||
99 | #define MAC_SRC_DATA_P(x) (x) | ||
100 | #define MAC_SRC_TOTAL_LEN(x) ((x) << 16) | ||
101 | |||
102 | u32 mac_digest; | ||
103 | #define MAC_DIGEST_P(x) (x) | ||
104 | #define MAC_FRAG_LEN(x) ((x) << 16) | ||
105 | u32 mac_iv; | ||
106 | #define MAC_INNER_IV_P(x) (x) | ||
107 | #define MAC_OUTER_IV_P(x) ((x) << 16) | ||
108 | }__attribute__ ((packed)); | ||
109 | /* | ||
110 | * /-----------\ 0 | ||
111 | * | ACCEL CFG | 4 * 8 | ||
112 | * |-----------| 0x20 | ||
113 | * | CRYPT KEY | 8 * 4 | ||
114 | * |-----------| 0x40 | ||
115 | * | IV IN | 4 * 4 | ||
116 | * |-----------| 0x40 (inplace) | ||
117 | * | IV BUF | 4 * 4 | ||
118 | * |-----------| 0x80 | ||
119 | * | DATA IN | 16 * x (max ->max_req_size) | ||
120 | * |-----------| 0x80 (inplace operation) | ||
121 | * | DATA OUT | 16 * x (max ->max_req_size) | ||
122 | * \-----------/ SRAM size | ||
123 | */ | ||
124 | |||
125 | /* Hashing memory map: | ||
126 | * /-----------\ 0 | ||
127 | * | ACCEL CFG | 4 * 8 | ||
128 | * |-----------| 0x20 | ||
129 | * | Inner IV | 5 * 4 | ||
130 | * |-----------| 0x34 | ||
131 | * | Outer IV | 5 * 4 | ||
132 | * |-----------| 0x48 | ||
133 | * | Output BUF| 5 * 4 | ||
134 | * |-----------| 0x80 | ||
135 | * | DATA IN | 64 * x (max ->max_req_size) | ||
136 | * \-----------/ SRAM size | ||
137 | */ | ||
138 | #define SRAM_CONFIG 0x00 | ||
139 | #define SRAM_DATA_KEY_P 0x20 | ||
140 | #define SRAM_DATA_IV 0x40 | ||
141 | #define SRAM_DATA_IV_BUF 0x40 | ||
142 | #define SRAM_DATA_IN_START 0x80 | ||
143 | #define SRAM_DATA_OUT_START 0x80 | ||
144 | |||
145 | #define SRAM_HMAC_IV_IN 0x20 | ||
146 | #define SRAM_HMAC_IV_OUT 0x34 | ||
147 | #define SRAM_DIGEST_BUF 0x48 | ||
148 | |||
149 | #define SRAM_CFG_SPACE 0x80 | ||
150 | |||
151 | #endif | ||
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index a9fd8b9e86cd..48de52cf2ecc 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c | |||
@@ -1962,10 +1962,8 @@ static struct n2_crypto *alloc_n2cp(void) | |||
1962 | 1962 | ||
1963 | static void free_n2cp(struct n2_crypto *np) | 1963 | static void free_n2cp(struct n2_crypto *np) |
1964 | { | 1964 | { |
1965 | if (np->cwq_info.ino_table) { | 1965 | kfree(np->cwq_info.ino_table); |
1966 | kfree(np->cwq_info.ino_table); | 1966 | np->cwq_info.ino_table = NULL; |
1967 | np->cwq_info.ino_table = NULL; | ||
1968 | } | ||
1969 | 1967 | ||
1970 | kfree(np); | 1968 | kfree(np); |
1971 | } | 1969 | } |
@@ -2079,10 +2077,8 @@ static struct n2_mau *alloc_ncp(void) | |||
2079 | 2077 | ||
2080 | static void free_ncp(struct n2_mau *mp) | 2078 | static void free_ncp(struct n2_mau *mp) |
2081 | { | 2079 | { |
2082 | if (mp->mau_info.ino_table) { | 2080 | kfree(mp->mau_info.ino_table); |
2083 | kfree(mp->mau_info.ino_table); | 2081 | mp->mau_info.ino_table = NULL; |
2084 | mp->mau_info.ino_table = NULL; | ||
2085 | } | ||
2086 | 2082 | ||
2087 | kfree(mp); | 2083 | kfree(mp); |
2088 | } | 2084 | } |
diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c index cddc6d8b55d9..bf52cd1d7fca 100644 --- a/drivers/crypto/nx/nx-842-pseries.c +++ b/drivers/crypto/nx/nx-842-pseries.c | |||
@@ -1082,7 +1082,7 @@ static int nx842_remove(struct vio_dev *viodev) | |||
1082 | return 0; | 1082 | return 0; |
1083 | } | 1083 | } |
1084 | 1084 | ||
1085 | static struct vio_device_id nx842_vio_driver_ids[] = { | 1085 | static const struct vio_device_id nx842_vio_driver_ids[] = { |
1086 | {"ibm,compression-v1", "ibm,compression"}, | 1086 | {"ibm,compression-v1", "ibm,compression"}, |
1087 | {"", ""}, | 1087 | {"", ""}, |
1088 | }; | 1088 | }; |
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c index abd465f479c4..a810596b97c2 100644 --- a/drivers/crypto/nx/nx-aes-gcm.c +++ b/drivers/crypto/nx/nx-aes-gcm.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <crypto/internal/aead.h> | 22 | #include <crypto/internal/aead.h> |
23 | #include <crypto/aes.h> | 23 | #include <crypto/aes.h> |
24 | #include <crypto/algapi.h> | 24 | #include <crypto/algapi.h> |
25 | #include <crypto/gcm.h> | ||
25 | #include <crypto/scatterwalk.h> | 26 | #include <crypto/scatterwalk.h> |
26 | #include <linux/module.h> | 27 | #include <linux/module.h> |
27 | #include <linux/types.h> | 28 | #include <linux/types.h> |
@@ -433,7 +434,7 @@ static int gcm_aes_nx_encrypt(struct aead_request *req) | |||
433 | struct nx_gcm_rctx *rctx = aead_request_ctx(req); | 434 | struct nx_gcm_rctx *rctx = aead_request_ctx(req); |
434 | char *iv = rctx->iv; | 435 | char *iv = rctx->iv; |
435 | 436 | ||
436 | memcpy(iv, req->iv, 12); | 437 | memcpy(iv, req->iv, GCM_AES_IV_SIZE); |
437 | 438 | ||
438 | return gcm_aes_nx_crypt(req, 1, req->assoclen); | 439 | return gcm_aes_nx_crypt(req, 1, req->assoclen); |
439 | } | 440 | } |
@@ -443,7 +444,7 @@ static int gcm_aes_nx_decrypt(struct aead_request *req) | |||
443 | struct nx_gcm_rctx *rctx = aead_request_ctx(req); | 444 | struct nx_gcm_rctx *rctx = aead_request_ctx(req); |
444 | char *iv = rctx->iv; | 445 | char *iv = rctx->iv; |
445 | 446 | ||
446 | memcpy(iv, req->iv, 12); | 447 | memcpy(iv, req->iv, GCM_AES_IV_SIZE); |
447 | 448 | ||
448 | return gcm_aes_nx_crypt(req, 0, req->assoclen); | 449 | return gcm_aes_nx_crypt(req, 0, req->assoclen); |
449 | } | 450 | } |
@@ -498,7 +499,7 @@ struct aead_alg nx_gcm_aes_alg = { | |||
498 | }, | 499 | }, |
499 | .init = nx_crypto_ctx_aes_gcm_init, | 500 | .init = nx_crypto_ctx_aes_gcm_init, |
500 | .exit = nx_crypto_ctx_aead_exit, | 501 | .exit = nx_crypto_ctx_aead_exit, |
501 | .ivsize = 12, | 502 | .ivsize = GCM_AES_IV_SIZE, |
502 | .maxauthsize = AES_BLOCK_SIZE, | 503 | .maxauthsize = AES_BLOCK_SIZE, |
503 | .setkey = gcm_aes_nx_set_key, | 504 | .setkey = gcm_aes_nx_set_key, |
504 | .encrypt = gcm_aes_nx_encrypt, | 505 | .encrypt = gcm_aes_nx_encrypt, |
@@ -516,7 +517,7 @@ struct aead_alg nx_gcm4106_aes_alg = { | |||
516 | }, | 517 | }, |
517 | .init = nx_crypto_ctx_aes_gcm_init, | 518 | .init = nx_crypto_ctx_aes_gcm_init, |
518 | .exit = nx_crypto_ctx_aead_exit, | 519 | .exit = nx_crypto_ctx_aead_exit, |
519 | .ivsize = 8, | 520 | .ivsize = GCM_RFC4106_IV_SIZE, |
520 | .maxauthsize = AES_BLOCK_SIZE, | 521 | .maxauthsize = AES_BLOCK_SIZE, |
521 | .setkey = gcm4106_aes_nx_set_key, | 522 | .setkey = gcm4106_aes_nx_set_key, |
522 | .setauthsize = gcm4106_aes_nx_setauthsize, | 523 | .setauthsize = gcm4106_aes_nx_setauthsize, |
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index 036057abb257..3a5e31be4764 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c | |||
@@ -833,7 +833,7 @@ static void __exit nx_fini(void) | |||
833 | vio_unregister_driver(&nx_driver.viodriver); | 833 | vio_unregister_driver(&nx_driver.viodriver); |
834 | } | 834 | } |
835 | 835 | ||
836 | static struct vio_device_id nx_crypto_driver_ids[] = { | 836 | static const struct vio_device_id nx_crypto_driver_ids[] = { |
837 | { "ibm,sym-encryption-v1", "ibm,sym-encryption" }, | 837 | { "ibm,sym-encryption-v1", "ibm,sym-encryption" }, |
838 | { "", "" } | 838 | { "", "" } |
839 | }; | 839 | }; |
diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c index 7d4f8a4be6d8..0cc3b65d7162 100644 --- a/drivers/crypto/omap-aes-gcm.c +++ b/drivers/crypto/omap-aes-gcm.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/omap-dma.h> | 18 | #include <linux/omap-dma.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <crypto/aes.h> | 20 | #include <crypto/aes.h> |
21 | #include <crypto/gcm.h> | ||
21 | #include <crypto/scatterwalk.h> | 22 | #include <crypto/scatterwalk.h> |
22 | #include <crypto/skcipher.h> | 23 | #include <crypto/skcipher.h> |
23 | #include <crypto/internal/aead.h> | 24 | #include <crypto/internal/aead.h> |
@@ -186,7 +187,7 @@ static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv) | |||
186 | sk_req = skcipher_request_alloc(ctx->ctr, GFP_KERNEL); | 187 | sk_req = skcipher_request_alloc(ctx->ctr, GFP_KERNEL); |
187 | if (!sk_req) { | 188 | if (!sk_req) { |
188 | pr_err("skcipher: Failed to allocate request\n"); | 189 | pr_err("skcipher: Failed to allocate request\n"); |
189 | return -1; | 190 | return -ENOMEM; |
190 | } | 191 | } |
191 | 192 | ||
192 | init_completion(&result.completion); | 193 | init_completion(&result.completion); |
@@ -214,7 +215,7 @@ static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv) | |||
214 | } | 215 | } |
215 | /* fall through */ | 216 | /* fall through */ |
216 | default: | 217 | default: |
217 | pr_err("Encryption of IV failed for GCM mode"); | 218 | pr_err("Encryption of IV failed for GCM mode\n"); |
218 | break; | 219 | break; |
219 | } | 220 | } |
220 | 221 | ||
@@ -311,7 +312,7 @@ static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode) | |||
311 | int err, assoclen; | 312 | int err, assoclen; |
312 | 313 | ||
313 | memset(rctx->auth_tag, 0, sizeof(rctx->auth_tag)); | 314 | memset(rctx->auth_tag, 0, sizeof(rctx->auth_tag)); |
314 | memcpy(rctx->iv + 12, &counter, 4); | 315 | memcpy(rctx->iv + GCM_AES_IV_SIZE, &counter, 4); |
315 | 316 | ||
316 | err = do_encrypt_iv(req, (u32 *)rctx->auth_tag, (u32 *)rctx->iv); | 317 | err = do_encrypt_iv(req, (u32 *)rctx->auth_tag, (u32 *)rctx->iv); |
317 | if (err) | 318 | if (err) |
@@ -339,7 +340,7 @@ int omap_aes_gcm_encrypt(struct aead_request *req) | |||
339 | { | 340 | { |
340 | struct omap_aes_reqctx *rctx = aead_request_ctx(req); | 341 | struct omap_aes_reqctx *rctx = aead_request_ctx(req); |
341 | 342 | ||
342 | memcpy(rctx->iv, req->iv, 12); | 343 | memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE); |
343 | return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM); | 344 | return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM); |
344 | } | 345 | } |
345 | 346 | ||
@@ -347,7 +348,7 @@ int omap_aes_gcm_decrypt(struct aead_request *req) | |||
347 | { | 348 | { |
348 | struct omap_aes_reqctx *rctx = aead_request_ctx(req); | 349 | struct omap_aes_reqctx *rctx = aead_request_ctx(req); |
349 | 350 | ||
350 | memcpy(rctx->iv, req->iv, 12); | 351 | memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE); |
351 | return omap_aes_gcm_crypt(req, FLAGS_GCM); | 352 | return omap_aes_gcm_crypt(req, FLAGS_GCM); |
352 | } | 353 | } |
353 | 354 | ||
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index c376a3ee7c2c..fbec0a2e76dd 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/interrupt.h> | 35 | #include <linux/interrupt.h> |
36 | #include <crypto/scatterwalk.h> | 36 | #include <crypto/scatterwalk.h> |
37 | #include <crypto/aes.h> | 37 | #include <crypto/aes.h> |
38 | #include <crypto/gcm.h> | ||
38 | #include <crypto/engine.h> | 39 | #include <crypto/engine.h> |
39 | #include <crypto/internal/skcipher.h> | 40 | #include <crypto/internal/skcipher.h> |
40 | #include <crypto/internal/aead.h> | 41 | #include <crypto/internal/aead.h> |
@@ -767,7 +768,7 @@ static struct aead_alg algs_aead_gcm[] = { | |||
767 | }, | 768 | }, |
768 | .init = omap_aes_gcm_cra_init, | 769 | .init = omap_aes_gcm_cra_init, |
769 | .exit = omap_aes_gcm_cra_exit, | 770 | .exit = omap_aes_gcm_cra_exit, |
770 | .ivsize = 12, | 771 | .ivsize = GCM_AES_IV_SIZE, |
771 | .maxauthsize = AES_BLOCK_SIZE, | 772 | .maxauthsize = AES_BLOCK_SIZE, |
772 | .setkey = omap_aes_gcm_setkey, | 773 | .setkey = omap_aes_gcm_setkey, |
773 | .encrypt = omap_aes_gcm_encrypt, | 774 | .encrypt = omap_aes_gcm_encrypt, |
@@ -788,7 +789,7 @@ static struct aead_alg algs_aead_gcm[] = { | |||
788 | .init = omap_aes_gcm_cra_init, | 789 | .init = omap_aes_gcm_cra_init, |
789 | .exit = omap_aes_gcm_cra_exit, | 790 | .exit = omap_aes_gcm_cra_exit, |
790 | .maxauthsize = AES_BLOCK_SIZE, | 791 | .maxauthsize = AES_BLOCK_SIZE, |
791 | .ivsize = 8, | 792 | .ivsize = GCM_RFC4106_IV_SIZE, |
792 | .setkey = omap_aes_4106gcm_setkey, | 793 | .setkey = omap_aes_4106gcm_setkey, |
793 | .encrypt = omap_aes_4106gcm_encrypt, | 794 | .encrypt = omap_aes_4106gcm_encrypt, |
794 | .decrypt = omap_aes_4106gcm_decrypt, | 795 | .decrypt = omap_aes_4106gcm_decrypt, |
@@ -974,11 +975,10 @@ static int omap_aes_get_res_of(struct omap_aes_dev *dd, | |||
974 | struct device *dev, struct resource *res) | 975 | struct device *dev, struct resource *res) |
975 | { | 976 | { |
976 | struct device_node *node = dev->of_node; | 977 | struct device_node *node = dev->of_node; |
977 | const struct of_device_id *match; | ||
978 | int err = 0; | 978 | int err = 0; |
979 | 979 | ||
980 | match = of_match_device(of_match_ptr(omap_aes_of_match), dev); | 980 | dd->pdata = of_device_get_match_data(dev); |
981 | if (!match) { | 981 | if (!dd->pdata) { |
982 | dev_err(dev, "no compatible OF match\n"); | 982 | dev_err(dev, "no compatible OF match\n"); |
983 | err = -EINVAL; | 983 | err = -EINVAL; |
984 | goto err; | 984 | goto err; |
@@ -991,8 +991,6 @@ static int omap_aes_get_res_of(struct omap_aes_dev *dd, | |||
991 | goto err; | 991 | goto err; |
992 | } | 992 | } |
993 | 993 | ||
994 | dd->pdata = match->data; | ||
995 | |||
996 | err: | 994 | err: |
997 | return err; | 995 | return err; |
998 | } | 996 | } |
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index d37c9506c36c..ebc5c0f11f03 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c | |||
@@ -928,16 +928,13 @@ MODULE_DEVICE_TABLE(of, omap_des_of_match); | |||
928 | static int omap_des_get_of(struct omap_des_dev *dd, | 928 | static int omap_des_get_of(struct omap_des_dev *dd, |
929 | struct platform_device *pdev) | 929 | struct platform_device *pdev) |
930 | { | 930 | { |
931 | const struct of_device_id *match; | ||
932 | 931 | ||
933 | match = of_match_device(of_match_ptr(omap_des_of_match), &pdev->dev); | 932 | dd->pdata = of_device_get_match_data(&pdev->dev); |
934 | if (!match) { | 933 | if (!dd->pdata) { |
935 | dev_err(&pdev->dev, "no compatible OF match\n"); | 934 | dev_err(&pdev->dev, "no compatible OF match\n"); |
936 | return -EINVAL; | 935 | return -EINVAL; |
937 | } | 936 | } |
938 | 937 | ||
939 | dd->pdata = match->data; | ||
940 | |||
941 | return 0; | 938 | return 0; |
942 | } | 939 | } |
943 | #else | 940 | #else |
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index c40ac30ec002..86b89ace836f 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c | |||
@@ -1944,11 +1944,10 @@ static int omap_sham_get_res_of(struct omap_sham_dev *dd, | |||
1944 | struct device *dev, struct resource *res) | 1944 | struct device *dev, struct resource *res) |
1945 | { | 1945 | { |
1946 | struct device_node *node = dev->of_node; | 1946 | struct device_node *node = dev->of_node; |
1947 | const struct of_device_id *match; | ||
1948 | int err = 0; | 1947 | int err = 0; |
1949 | 1948 | ||
1950 | match = of_match_device(of_match_ptr(omap_sham_of_match), dev); | 1949 | dd->pdata = of_device_get_match_data(dev); |
1951 | if (!match) { | 1950 | if (!dd->pdata) { |
1952 | dev_err(dev, "no compatible OF match\n"); | 1951 | dev_err(dev, "no compatible OF match\n"); |
1953 | err = -EINVAL; | 1952 | err = -EINVAL; |
1954 | goto err; | 1953 | goto err; |
@@ -1968,8 +1967,6 @@ static int omap_sham_get_res_of(struct omap_sham_dev *dd, | |||
1968 | goto err; | 1967 | goto err; |
1969 | } | 1968 | } |
1970 | 1969 | ||
1971 | dd->pdata = match->data; | ||
1972 | |||
1973 | err: | 1970 | err: |
1974 | return err; | 1971 | return err; |
1975 | } | 1972 | } |
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index b3869748cc6b..4b6642a25df5 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c | |||
@@ -482,7 +482,7 @@ static struct crypto_alg cbc_aes_alg = { | |||
482 | } | 482 | } |
483 | }; | 483 | }; |
484 | 484 | ||
485 | static struct x86_cpu_id padlock_cpu_id[] = { | 485 | static const struct x86_cpu_id padlock_cpu_id[] = { |
486 | X86_FEATURE_MATCH(X86_FEATURE_XCRYPT), | 486 | X86_FEATURE_MATCH(X86_FEATURE_XCRYPT), |
487 | {} | 487 | {} |
488 | }; | 488 | }; |
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index bc72d20c32c3..d32c79328876 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c | |||
@@ -509,7 +509,7 @@ static struct shash_alg sha256_alg_nano = { | |||
509 | } | 509 | } |
510 | }; | 510 | }; |
511 | 511 | ||
512 | static struct x86_cpu_id padlock_sha_ids[] = { | 512 | static const struct x86_cpu_id padlock_sha_ids[] = { |
513 | X86_FEATURE_MATCH(X86_FEATURE_PHE), | 513 | X86_FEATURE_MATCH(X86_FEATURE_PHE), |
514 | {} | 514 | {} |
515 | }; | 515 | }; |
diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c index 8afac52677a6..2d06409bd3c4 100644 --- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c +++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c | |||
@@ -228,11 +228,8 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev, | |||
228 | list_add_tail(&map->list, &vfs_table); | 228 | list_add_tail(&map->list, &vfs_table); |
229 | } else if (accel_dev->is_vf && pf) { | 229 | } else if (accel_dev->is_vf && pf) { |
230 | /* VF on host */ | 230 | /* VF on host */ |
231 | struct adf_accel_vf_info *vf_info; | ||
232 | struct vf_id_map *map; | 231 | struct vf_id_map *map; |
233 | 232 | ||
234 | vf_info = pf->pf.vf_info + adf_get_vf_id(accel_dev); | ||
235 | |||
236 | map = adf_find_vf(adf_get_vf_num(accel_dev)); | 233 | map = adf_find_vf(adf_get_vf_num(accel_dev)); |
237 | if (map) { | 234 | if (map) { |
238 | struct vf_id_map *next; | 235 | struct vf_id_map *next; |
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c index 6f5dd68449c6..13c52d6bf630 100644 --- a/drivers/crypto/qat/qat_common/qat_asym_algs.c +++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c | |||
@@ -443,9 +443,6 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params) | |||
443 | struct qat_crypto_instance *inst = ctx->inst; | 443 | struct qat_crypto_instance *inst = ctx->inst; |
444 | struct device *dev = &GET_DEV(inst->accel_dev); | 444 | struct device *dev = &GET_DEV(inst->accel_dev); |
445 | 445 | ||
446 | if (unlikely(!params->p || !params->g)) | ||
447 | return -EINVAL; | ||
448 | |||
449 | if (qat_dh_check_params_length(params->p_size << 3)) | 446 | if (qat_dh_check_params_length(params->p_size << 3)) |
450 | return -EINVAL; | 447 | return -EINVAL; |
451 | 448 | ||
@@ -462,11 +459,8 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params) | |||
462 | } | 459 | } |
463 | 460 | ||
464 | ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL); | 461 | ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL); |
465 | if (!ctx->g) { | 462 | if (!ctx->g) |
466 | dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p); | ||
467 | ctx->p = NULL; | ||
468 | return -ENOMEM; | 463 | return -ENOMEM; |
469 | } | ||
470 | memcpy(ctx->g + (ctx->p_size - params->g_size), params->g, | 464 | memcpy(ctx->g + (ctx->p_size - params->g_size), params->g, |
471 | params->g_size); | 465 | params->g_size); |
472 | 466 | ||
@@ -507,18 +501,22 @@ static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf, | |||
507 | 501 | ||
508 | ret = qat_dh_set_params(ctx, ¶ms); | 502 | ret = qat_dh_set_params(ctx, ¶ms); |
509 | if (ret < 0) | 503 | if (ret < 0) |
510 | return ret; | 504 | goto err_clear_ctx; |
511 | 505 | ||
512 | ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa, | 506 | ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa, |
513 | GFP_KERNEL); | 507 | GFP_KERNEL); |
514 | if (!ctx->xa) { | 508 | if (!ctx->xa) { |
515 | qat_dh_clear_ctx(dev, ctx); | 509 | ret = -ENOMEM; |
516 | return -ENOMEM; | 510 | goto err_clear_ctx; |
517 | } | 511 | } |
518 | memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key, | 512 | memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key, |
519 | params.key_size); | 513 | params.key_size); |
520 | 514 | ||
521 | return 0; | 515 | return 0; |
516 | |||
517 | err_clear_ctx: | ||
518 | qat_dh_clear_ctx(dev, ctx); | ||
519 | return ret; | ||
522 | } | 520 | } |
523 | 521 | ||
524 | static unsigned int qat_dh_max_size(struct crypto_kpp *tfm) | 522 | static unsigned int qat_dh_max_size(struct crypto_kpp *tfm) |
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index e2454d90d949..98d22c2096e3 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c | |||
@@ -567,26 +567,26 @@ qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj, | |||
567 | code_page->imp_expr_tab_offset); | 567 | code_page->imp_expr_tab_offset); |
568 | if (uc_var_tab->entry_num || imp_var_tab->entry_num || | 568 | if (uc_var_tab->entry_num || imp_var_tab->entry_num || |
569 | imp_expr_tab->entry_num) { | 569 | imp_expr_tab->entry_num) { |
570 | pr_err("QAT: UOF can't contain imported variable to be parsed"); | 570 | pr_err("QAT: UOF can't contain imported variable to be parsed\n"); |
571 | return -EINVAL; | 571 | return -EINVAL; |
572 | } | 572 | } |
573 | neigh_reg_tab = (struct icp_qat_uof_objtable *) | 573 | neigh_reg_tab = (struct icp_qat_uof_objtable *) |
574 | (encap_uof_obj->beg_uof + | 574 | (encap_uof_obj->beg_uof + |
575 | code_page->neigh_reg_tab_offset); | 575 | code_page->neigh_reg_tab_offset); |
576 | if (neigh_reg_tab->entry_num) { | 576 | if (neigh_reg_tab->entry_num) { |
577 | pr_err("QAT: UOF can't contain shared control store feature"); | 577 | pr_err("QAT: UOF can't contain shared control store feature\n"); |
578 | return -EINVAL; | 578 | return -EINVAL; |
579 | } | 579 | } |
580 | if (image->numpages > 1) { | 580 | if (image->numpages > 1) { |
581 | pr_err("QAT: UOF can't contain multiple pages"); | 581 | pr_err("QAT: UOF can't contain multiple pages\n"); |
582 | return -EINVAL; | 582 | return -EINVAL; |
583 | } | 583 | } |
584 | if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) { | 584 | if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) { |
585 | pr_err("QAT: UOF can't use shared control store feature"); | 585 | pr_err("QAT: UOF can't use shared control store feature\n"); |
586 | return -EFAULT; | 586 | return -EFAULT; |
587 | } | 587 | } |
588 | if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) { | 588 | if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) { |
589 | pr_err("QAT: UOF can't use reloadable feature"); | 589 | pr_err("QAT: UOF can't use reloadable feature\n"); |
590 | return -EFAULT; | 590 | return -EFAULT; |
591 | } | 591 | } |
592 | return 0; | 592 | return 0; |
@@ -702,7 +702,7 @@ static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae) | |||
702 | } | 702 | } |
703 | } | 703 | } |
704 | if (!mflag) { | 704 | if (!mflag) { |
705 | pr_err("QAT: uimage uses AE not set"); | 705 | pr_err("QAT: uimage uses AE not set\n"); |
706 | return -EINVAL; | 706 | return -EINVAL; |
707 | } | 707 | } |
708 | return 0; | 708 | return 0; |
@@ -791,6 +791,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle, | |||
791 | case ICP_GPA_ABS: | 791 | case ICP_GPA_ABS: |
792 | case ICP_GPB_ABS: | 792 | case ICP_GPB_ABS: |
793 | ctx_mask = 0; | 793 | ctx_mask = 0; |
794 | /* fall through */ | ||
794 | case ICP_GPA_REL: | 795 | case ICP_GPA_REL: |
795 | case ICP_GPB_REL: | 796 | case ICP_GPB_REL: |
796 | return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type, | 797 | return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type, |
@@ -800,6 +801,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle, | |||
800 | case ICP_SR_RD_ABS: | 801 | case ICP_SR_RD_ABS: |
801 | case ICP_DR_RD_ABS: | 802 | case ICP_DR_RD_ABS: |
802 | ctx_mask = 0; | 803 | ctx_mask = 0; |
804 | /* fall through */ | ||
803 | case ICP_SR_REL: | 805 | case ICP_SR_REL: |
804 | case ICP_DR_REL: | 806 | case ICP_DR_REL: |
805 | case ICP_SR_RD_REL: | 807 | case ICP_SR_RD_REL: |
@@ -809,6 +811,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle, | |||
809 | case ICP_SR_WR_ABS: | 811 | case ICP_SR_WR_ABS: |
810 | case ICP_DR_WR_ABS: | 812 | case ICP_DR_WR_ABS: |
811 | ctx_mask = 0; | 813 | ctx_mask = 0; |
814 | /* fall through */ | ||
812 | case ICP_SR_WR_REL: | 815 | case ICP_SR_WR_REL: |
813 | case ICP_DR_WR_REL: | 816 | case ICP_DR_WR_REL: |
814 | return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type, | 817 | return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type, |
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c index b04b42f48366..ea4d96bf47e8 100644 --- a/drivers/crypto/qce/ablkcipher.c +++ b/drivers/crypto/qce/ablkcipher.c | |||
@@ -248,10 +248,7 @@ static int qce_ablkcipher_init(struct crypto_tfm *tfm) | |||
248 | ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(tfm), 0, | 248 | ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(tfm), 0, |
249 | CRYPTO_ALG_ASYNC | | 249 | CRYPTO_ALG_ASYNC | |
250 | CRYPTO_ALG_NEED_FALLBACK); | 250 | CRYPTO_ALG_NEED_FALLBACK); |
251 | if (IS_ERR(ctx->fallback)) | 251 | return PTR_ERR_OR_ZERO(ctx->fallback); |
252 | return PTR_ERR(ctx->fallback); | ||
253 | |||
254 | return 0; | ||
255 | } | 252 | } |
256 | 253 | ||
257 | static void qce_ablkcipher_exit(struct crypto_tfm *tfm) | 254 | static void qce_ablkcipher_exit(struct crypto_tfm *tfm) |
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index 47e114ac09d0..53227d70d397 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c | |||
@@ -349,28 +349,12 @@ static int qce_ahash_digest(struct ahash_request *req) | |||
349 | return qce->async_req_enqueue(tmpl->qce, &req->base); | 349 | return qce->async_req_enqueue(tmpl->qce, &req->base); |
350 | } | 350 | } |
351 | 351 | ||
352 | struct qce_ahash_result { | ||
353 | struct completion completion; | ||
354 | int error; | ||
355 | }; | ||
356 | |||
357 | static void qce_digest_complete(struct crypto_async_request *req, int error) | ||
358 | { | ||
359 | struct qce_ahash_result *result = req->data; | ||
360 | |||
361 | if (error == -EINPROGRESS) | ||
362 | return; | ||
363 | |||
364 | result->error = error; | ||
365 | complete(&result->completion); | ||
366 | } | ||
367 | |||
368 | static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, | 352 | static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, |
369 | unsigned int keylen) | 353 | unsigned int keylen) |
370 | { | 354 | { |
371 | unsigned int digestsize = crypto_ahash_digestsize(tfm); | 355 | unsigned int digestsize = crypto_ahash_digestsize(tfm); |
372 | struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base); | 356 | struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base); |
373 | struct qce_ahash_result result; | 357 | struct crypto_wait wait; |
374 | struct ahash_request *req; | 358 | struct ahash_request *req; |
375 | struct scatterlist sg; | 359 | struct scatterlist sg; |
376 | unsigned int blocksize; | 360 | unsigned int blocksize; |
@@ -405,9 +389,9 @@ static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
405 | goto err_free_ahash; | 389 | goto err_free_ahash; |
406 | } | 390 | } |
407 | 391 | ||
408 | init_completion(&result.completion); | 392 | crypto_init_wait(&wait); |
409 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | 393 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
410 | qce_digest_complete, &result); | 394 | crypto_req_done, &wait); |
411 | crypto_ahash_clear_flags(ahash_tfm, ~0); | 395 | crypto_ahash_clear_flags(ahash_tfm, ~0); |
412 | 396 | ||
413 | buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL); | 397 | buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL); |
@@ -420,13 +404,7 @@ static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
420 | sg_init_one(&sg, buf, keylen); | 404 | sg_init_one(&sg, buf, keylen); |
421 | ahash_request_set_crypt(req, &sg, ctx->authkey, keylen); | 405 | ahash_request_set_crypt(req, &sg, ctx->authkey, keylen); |
422 | 406 | ||
423 | ret = crypto_ahash_digest(req); | 407 | ret = crypto_wait_req(crypto_ahash_digest(req), &wait); |
424 | if (ret == -EINPROGRESS || ret == -EBUSY) { | ||
425 | ret = wait_for_completion_interruptible(&result.completion); | ||
426 | if (!ret) | ||
427 | ret = result.error; | ||
428 | } | ||
429 | |||
430 | if (ret) | 408 | if (ret) |
431 | crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | 409 | crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
432 | 410 | ||
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index 7ac657f46d15..142c6020cec7 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c | |||
@@ -1,14 +1,16 @@ | |||
1 | /* | 1 | /* |
2 | * Cryptographic API. | 2 | * Cryptographic API. |
3 | * | 3 | * |
4 | * Support for Samsung S5PV210 HW acceleration. | 4 | * Support for Samsung S5PV210 and Exynos HW acceleration. |
5 | * | 5 | * |
6 | * Copyright (C) 2011 NetUP Inc. All rights reserved. | 6 | * Copyright (C) 2011 NetUP Inc. All rights reserved. |
7 | * Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved. | ||
7 | * | 8 | * |
8 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License version 2 as published | 10 | * it under the terms of the GNU General Public License version 2 as published |
10 | * by the Free Software Foundation. | 11 | * by the Free Software Foundation. |
11 | * | 12 | * |
13 | * Hash part based on omap-sham.c driver. | ||
12 | */ | 14 | */ |
13 | 15 | ||
14 | #include <linux/clk.h> | 16 | #include <linux/clk.h> |
@@ -30,98 +32,112 @@ | |||
30 | #include <crypto/algapi.h> | 32 | #include <crypto/algapi.h> |
31 | #include <crypto/scatterwalk.h> | 33 | #include <crypto/scatterwalk.h> |
32 | 34 | ||
33 | #define _SBF(s, v) ((v) << (s)) | 35 | #include <crypto/hash.h> |
36 | #include <crypto/md5.h> | ||
37 | #include <crypto/sha.h> | ||
38 | #include <crypto/internal/hash.h> | ||
39 | |||
40 | #define _SBF(s, v) ((v) << (s)) | ||
34 | 41 | ||
35 | /* Feed control registers */ | 42 | /* Feed control registers */ |
36 | #define SSS_REG_FCINTSTAT 0x0000 | 43 | #define SSS_REG_FCINTSTAT 0x0000 |
37 | #define SSS_FCINTSTAT_BRDMAINT BIT(3) | 44 | #define SSS_FCINTSTAT_HPARTINT BIT(7) |
38 | #define SSS_FCINTSTAT_BTDMAINT BIT(2) | 45 | #define SSS_FCINTSTAT_HDONEINT BIT(5) |
39 | #define SSS_FCINTSTAT_HRDMAINT BIT(1) | 46 | #define SSS_FCINTSTAT_BRDMAINT BIT(3) |
40 | #define SSS_FCINTSTAT_PKDMAINT BIT(0) | 47 | #define SSS_FCINTSTAT_BTDMAINT BIT(2) |
41 | 48 | #define SSS_FCINTSTAT_HRDMAINT BIT(1) | |
42 | #define SSS_REG_FCINTENSET 0x0004 | 49 | #define SSS_FCINTSTAT_PKDMAINT BIT(0) |
43 | #define SSS_FCINTENSET_BRDMAINTENSET BIT(3) | 50 | |
44 | #define SSS_FCINTENSET_BTDMAINTENSET BIT(2) | 51 | #define SSS_REG_FCINTENSET 0x0004 |
45 | #define SSS_FCINTENSET_HRDMAINTENSET BIT(1) | 52 | #define SSS_FCINTENSET_HPARTINTENSET BIT(7) |
46 | #define SSS_FCINTENSET_PKDMAINTENSET BIT(0) | 53 | #define SSS_FCINTENSET_HDONEINTENSET BIT(5) |
47 | 54 | #define SSS_FCINTENSET_BRDMAINTENSET BIT(3) | |
48 | #define SSS_REG_FCINTENCLR 0x0008 | 55 | #define SSS_FCINTENSET_BTDMAINTENSET BIT(2) |
49 | #define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3) | 56 | #define SSS_FCINTENSET_HRDMAINTENSET BIT(1) |
50 | #define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2) | 57 | #define SSS_FCINTENSET_PKDMAINTENSET BIT(0) |
51 | #define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1) | 58 | |
52 | #define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0) | 59 | #define SSS_REG_FCINTENCLR 0x0008 |
53 | 60 | #define SSS_FCINTENCLR_HPARTINTENCLR BIT(7) | |
54 | #define SSS_REG_FCINTPEND 0x000C | 61 | #define SSS_FCINTENCLR_HDONEINTENCLR BIT(5) |
55 | #define SSS_FCINTPEND_BRDMAINTP BIT(3) | 62 | #define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3) |
56 | #define SSS_FCINTPEND_BTDMAINTP BIT(2) | 63 | #define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2) |
57 | #define SSS_FCINTPEND_HRDMAINTP BIT(1) | 64 | #define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1) |
58 | #define SSS_FCINTPEND_PKDMAINTP BIT(0) | 65 | #define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0) |
59 | 66 | ||
60 | #define SSS_REG_FCFIFOSTAT 0x0010 | 67 | #define SSS_REG_FCINTPEND 0x000C |
61 | #define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7) | 68 | #define SSS_FCINTPEND_HPARTINTP BIT(7) |
62 | #define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6) | 69 | #define SSS_FCINTPEND_HDONEINTP BIT(5) |
63 | #define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5) | 70 | #define SSS_FCINTPEND_BRDMAINTP BIT(3) |
64 | #define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4) | 71 | #define SSS_FCINTPEND_BTDMAINTP BIT(2) |
65 | #define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3) | 72 | #define SSS_FCINTPEND_HRDMAINTP BIT(1) |
66 | #define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2) | 73 | #define SSS_FCINTPEND_PKDMAINTP BIT(0) |
67 | #define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1) | 74 | |
68 | #define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0) | 75 | #define SSS_REG_FCFIFOSTAT 0x0010 |
69 | 76 | #define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7) | |
70 | #define SSS_REG_FCFIFOCTRL 0x0014 | 77 | #define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6) |
71 | #define SSS_FCFIFOCTRL_DESSEL BIT(2) | 78 | #define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5) |
72 | #define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00) | 79 | #define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4) |
73 | #define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01) | 80 | #define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3) |
74 | #define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02) | 81 | #define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2) |
75 | 82 | #define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1) | |
76 | #define SSS_REG_FCBRDMAS 0x0020 | 83 | #define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0) |
77 | #define SSS_REG_FCBRDMAL 0x0024 | 84 | |
78 | #define SSS_REG_FCBRDMAC 0x0028 | 85 | #define SSS_REG_FCFIFOCTRL 0x0014 |
79 | #define SSS_FCBRDMAC_BYTESWAP BIT(1) | 86 | #define SSS_FCFIFOCTRL_DESSEL BIT(2) |
80 | #define SSS_FCBRDMAC_FLUSH BIT(0) | 87 | #define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00) |
81 | 88 | #define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01) | |
82 | #define SSS_REG_FCBTDMAS 0x0030 | 89 | #define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02) |
83 | #define SSS_REG_FCBTDMAL 0x0034 | 90 | #define SSS_HASHIN_MASK _SBF(0, 0x03) |
84 | #define SSS_REG_FCBTDMAC 0x0038 | 91 | |
85 | #define SSS_FCBTDMAC_BYTESWAP BIT(1) | 92 | #define SSS_REG_FCBRDMAS 0x0020 |
86 | #define SSS_FCBTDMAC_FLUSH BIT(0) | 93 | #define SSS_REG_FCBRDMAL 0x0024 |
87 | 94 | #define SSS_REG_FCBRDMAC 0x0028 | |
88 | #define SSS_REG_FCHRDMAS 0x0040 | 95 | #define SSS_FCBRDMAC_BYTESWAP BIT(1) |
89 | #define SSS_REG_FCHRDMAL 0x0044 | 96 | #define SSS_FCBRDMAC_FLUSH BIT(0) |
90 | #define SSS_REG_FCHRDMAC 0x0048 | 97 | |
91 | #define SSS_FCHRDMAC_BYTESWAP BIT(1) | 98 | #define SSS_REG_FCBTDMAS 0x0030 |
92 | #define SSS_FCHRDMAC_FLUSH BIT(0) | 99 | #define SSS_REG_FCBTDMAL 0x0034 |
93 | 100 | #define SSS_REG_FCBTDMAC 0x0038 | |
94 | #define SSS_REG_FCPKDMAS 0x0050 | 101 | #define SSS_FCBTDMAC_BYTESWAP BIT(1) |
95 | #define SSS_REG_FCPKDMAL 0x0054 | 102 | #define SSS_FCBTDMAC_FLUSH BIT(0) |
96 | #define SSS_REG_FCPKDMAC 0x0058 | 103 | |
97 | #define SSS_FCPKDMAC_BYTESWAP BIT(3) | 104 | #define SSS_REG_FCHRDMAS 0x0040 |
98 | #define SSS_FCPKDMAC_DESCEND BIT(2) | 105 | #define SSS_REG_FCHRDMAL 0x0044 |
99 | #define SSS_FCPKDMAC_TRANSMIT BIT(1) | 106 | #define SSS_REG_FCHRDMAC 0x0048 |
100 | #define SSS_FCPKDMAC_FLUSH BIT(0) | 107 | #define SSS_FCHRDMAC_BYTESWAP BIT(1) |
101 | 108 | #define SSS_FCHRDMAC_FLUSH BIT(0) | |
102 | #define SSS_REG_FCPKDMAO 0x005C | 109 | |
110 | #define SSS_REG_FCPKDMAS 0x0050 | ||
111 | #define SSS_REG_FCPKDMAL 0x0054 | ||
112 | #define SSS_REG_FCPKDMAC 0x0058 | ||
113 | #define SSS_FCPKDMAC_BYTESWAP BIT(3) | ||
114 | #define SSS_FCPKDMAC_DESCEND BIT(2) | ||
115 | #define SSS_FCPKDMAC_TRANSMIT BIT(1) | ||
116 | #define SSS_FCPKDMAC_FLUSH BIT(0) | ||
117 | |||
118 | #define SSS_REG_FCPKDMAO 0x005C | ||
103 | 119 | ||
104 | /* AES registers */ | 120 | /* AES registers */ |
105 | #define SSS_REG_AES_CONTROL 0x00 | 121 | #define SSS_REG_AES_CONTROL 0x00 |
106 | #define SSS_AES_BYTESWAP_DI BIT(11) | 122 | #define SSS_AES_BYTESWAP_DI BIT(11) |
107 | #define SSS_AES_BYTESWAP_DO BIT(10) | 123 | #define SSS_AES_BYTESWAP_DO BIT(10) |
108 | #define SSS_AES_BYTESWAP_IV BIT(9) | 124 | #define SSS_AES_BYTESWAP_IV BIT(9) |
109 | #define SSS_AES_BYTESWAP_CNT BIT(8) | 125 | #define SSS_AES_BYTESWAP_CNT BIT(8) |
110 | #define SSS_AES_BYTESWAP_KEY BIT(7) | 126 | #define SSS_AES_BYTESWAP_KEY BIT(7) |
111 | #define SSS_AES_KEY_CHANGE_MODE BIT(6) | 127 | #define SSS_AES_KEY_CHANGE_MODE BIT(6) |
112 | #define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00) | 128 | #define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00) |
113 | #define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01) | 129 | #define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01) |
114 | #define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02) | 130 | #define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02) |
115 | #define SSS_AES_FIFO_MODE BIT(3) | 131 | #define SSS_AES_FIFO_MODE BIT(3) |
116 | #define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00) | 132 | #define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00) |
117 | #define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01) | 133 | #define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01) |
118 | #define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02) | 134 | #define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02) |
119 | #define SSS_AES_MODE_DECRYPT BIT(0) | 135 | #define SSS_AES_MODE_DECRYPT BIT(0) |
120 | 136 | ||
121 | #define SSS_REG_AES_STATUS 0x04 | 137 | #define SSS_REG_AES_STATUS 0x04 |
122 | #define SSS_AES_BUSY BIT(2) | 138 | #define SSS_AES_BUSY BIT(2) |
123 | #define SSS_AES_INPUT_READY BIT(1) | 139 | #define SSS_AES_INPUT_READY BIT(1) |
124 | #define SSS_AES_OUTPUT_READY BIT(0) | 140 | #define SSS_AES_OUTPUT_READY BIT(0) |
125 | 141 | ||
126 | #define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2)) | 142 | #define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2)) |
127 | #define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2)) | 143 | #define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2)) |
@@ -129,26 +145,97 @@ | |||
129 | #define SSS_REG_AES_CNT_DATA(s) (0x40 + (s << 2)) | 145 | #define SSS_REG_AES_CNT_DATA(s) (0x40 + (s << 2)) |
130 | #define SSS_REG_AES_KEY_DATA(s) (0x80 + (s << 2)) | 146 | #define SSS_REG_AES_KEY_DATA(s) (0x80 + (s << 2)) |
131 | 147 | ||
132 | #define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg)) | 148 | #define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg)) |
133 | #define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg)) | 149 | #define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg)) |
134 | #define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg)) | 150 | #define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg)) |
135 | 151 | ||
136 | #define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg) | 152 | #define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg) |
137 | #define SSS_AES_WRITE(dev, reg, val) __raw_writel((val), \ | 153 | #define SSS_AES_WRITE(dev, reg, val) __raw_writel((val), \ |
138 | SSS_AES_REG(dev, reg)) | 154 | SSS_AES_REG(dev, reg)) |
139 | 155 | ||
140 | /* HW engine modes */ | 156 | /* HW engine modes */ |
141 | #define FLAGS_AES_DECRYPT BIT(0) | 157 | #define FLAGS_AES_DECRYPT BIT(0) |
142 | #define FLAGS_AES_MODE_MASK _SBF(1, 0x03) | 158 | #define FLAGS_AES_MODE_MASK _SBF(1, 0x03) |
143 | #define FLAGS_AES_CBC _SBF(1, 0x01) | 159 | #define FLAGS_AES_CBC _SBF(1, 0x01) |
144 | #define FLAGS_AES_CTR _SBF(1, 0x02) | 160 | #define FLAGS_AES_CTR _SBF(1, 0x02) |
161 | |||
162 | #define AES_KEY_LEN 16 | ||
163 | #define CRYPTO_QUEUE_LEN 1 | ||
164 | |||
165 | /* HASH registers */ | ||
166 | #define SSS_REG_HASH_CTRL 0x00 | ||
167 | |||
168 | #define SSS_HASH_USER_IV_EN BIT(5) | ||
169 | #define SSS_HASH_INIT_BIT BIT(4) | ||
170 | #define SSS_HASH_ENGINE_SHA1 _SBF(1, 0x00) | ||
171 | #define SSS_HASH_ENGINE_MD5 _SBF(1, 0x01) | ||
172 | #define SSS_HASH_ENGINE_SHA256 _SBF(1, 0x02) | ||
173 | |||
174 | #define SSS_HASH_ENGINE_MASK _SBF(1, 0x03) | ||
175 | |||
176 | #define SSS_REG_HASH_CTRL_PAUSE 0x04 | ||
177 | |||
178 | #define SSS_HASH_PAUSE BIT(0) | ||
179 | |||
180 | #define SSS_REG_HASH_CTRL_FIFO 0x08 | ||
181 | |||
182 | #define SSS_HASH_FIFO_MODE_DMA BIT(0) | ||
183 | #define SSS_HASH_FIFO_MODE_CPU 0 | ||
184 | |||
185 | #define SSS_REG_HASH_CTRL_SWAP 0x0C | ||
186 | |||
187 | #define SSS_HASH_BYTESWAP_DI BIT(3) | ||
188 | #define SSS_HASH_BYTESWAP_DO BIT(2) | ||
189 | #define SSS_HASH_BYTESWAP_IV BIT(1) | ||
190 | #define SSS_HASH_BYTESWAP_KEY BIT(0) | ||
191 | |||
192 | #define SSS_REG_HASH_STATUS 0x10 | ||
193 | |||
194 | #define SSS_HASH_STATUS_MSG_DONE BIT(6) | ||
195 | #define SSS_HASH_STATUS_PARTIAL_DONE BIT(4) | ||
196 | #define SSS_HASH_STATUS_BUFFER_READY BIT(0) | ||
197 | |||
198 | #define SSS_REG_HASH_MSG_SIZE_LOW 0x20 | ||
199 | #define SSS_REG_HASH_MSG_SIZE_HIGH 0x24 | ||
200 | |||
201 | #define SSS_REG_HASH_PRE_MSG_SIZE_LOW 0x28 | ||
202 | #define SSS_REG_HASH_PRE_MSG_SIZE_HIGH 0x2C | ||
145 | 203 | ||
146 | #define AES_KEY_LEN 16 | 204 | #define SSS_REG_HASH_IV(s) (0xB0 + ((s) << 2)) |
147 | #define CRYPTO_QUEUE_LEN 1 | 205 | #define SSS_REG_HASH_OUT(s) (0x100 + ((s) << 2)) |
206 | |||
207 | #define HASH_BLOCK_SIZE 64 | ||
208 | #define HASH_REG_SIZEOF 4 | ||
209 | #define HASH_MD5_MAX_REG (MD5_DIGEST_SIZE / HASH_REG_SIZEOF) | ||
210 | #define HASH_SHA1_MAX_REG (SHA1_DIGEST_SIZE / HASH_REG_SIZEOF) | ||
211 | #define HASH_SHA256_MAX_REG (SHA256_DIGEST_SIZE / HASH_REG_SIZEOF) | ||
212 | |||
213 | /* | ||
214 | * HASH bit numbers, used by device, setting in dev->hash_flags with | ||
215 | * functions set_bit(), clear_bit() or tested with test_bit() or BIT(), | ||
216 | * to keep HASH state BUSY or FREE, or to signal state from irq_handler | ||
217 | * to hash_tasklet. SGS keep track of allocated memory for scatterlist | ||
218 | */ | ||
219 | #define HASH_FLAGS_BUSY 0 | ||
220 | #define HASH_FLAGS_FINAL 1 | ||
221 | #define HASH_FLAGS_DMA_ACTIVE 2 | ||
222 | #define HASH_FLAGS_OUTPUT_READY 3 | ||
223 | #define HASH_FLAGS_DMA_READY 4 | ||
224 | #define HASH_FLAGS_SGS_COPIED 5 | ||
225 | #define HASH_FLAGS_SGS_ALLOCED 6 | ||
226 | |||
227 | /* HASH HW constants */ | ||
228 | #define BUFLEN HASH_BLOCK_SIZE | ||
229 | |||
230 | #define SSS_HASH_DMA_LEN_ALIGN 8 | ||
231 | #define SSS_HASH_DMA_ALIGN_MASK (SSS_HASH_DMA_LEN_ALIGN - 1) | ||
232 | |||
233 | #define SSS_HASH_QUEUE_LENGTH 10 | ||
148 | 234 | ||
149 | /** | 235 | /** |
150 | * struct samsung_aes_variant - platform specific SSS driver data | 236 | * struct samsung_aes_variant - platform specific SSS driver data |
151 | * @aes_offset: AES register offset from SSS module's base. | 237 | * @aes_offset: AES register offset from SSS module's base. |
238 | * @hash_offset: HASH register offset from SSS module's base. | ||
152 | * | 239 | * |
153 | * Specifies platform specific configuration of SSS module. | 240 | * Specifies platform specific configuration of SSS module. |
154 | * Note: A structure for driver specific platform data is used for future | 241 | * Note: A structure for driver specific platform data is used for future |
@@ -156,6 +243,7 @@ | |||
156 | */ | 243 | */ |
157 | struct samsung_aes_variant { | 244 | struct samsung_aes_variant { |
158 | unsigned int aes_offset; | 245 | unsigned int aes_offset; |
246 | unsigned int hash_offset; | ||
159 | }; | 247 | }; |
160 | 248 | ||
161 | struct s5p_aes_reqctx { | 249 | struct s5p_aes_reqctx { |
@@ -195,6 +283,19 @@ struct s5p_aes_ctx { | |||
195 | * protects against concurrent access to these fields. | 283 | * protects against concurrent access to these fields. |
196 | * @lock: Lock for protecting both access to device hardware registers | 284 | * @lock: Lock for protecting both access to device hardware registers |
197 | * and fields related to current request (including the busy field). | 285 | * and fields related to current request (including the busy field). |
286 | * @res: Resources for hash. | ||
287 | * @io_hash_base: Per-variant offset for HASH block IO memory. | ||
288 | * @hash_lock: Lock for protecting hash_req, hash_queue and hash_flags | ||
289 | * variable. | ||
290 | * @hash_flags: Flags for current HASH op. | ||
291 | * @hash_queue: Async hash queue. | ||
292 | * @hash_tasklet: New HASH request scheduling job. | ||
293 | * @xmit_buf: Buffer for current HASH request transfer into SSS block. | ||
294 | * @hash_req: Current request sending to SSS HASH block. | ||
295 | * @hash_sg_iter: Scatterlist transferred through DMA into SSS HASH block. | ||
296 | * @hash_sg_cnt: Counter for hash_sg_iter. | ||
297 | * | ||
298 | * @use_hash: true if HASH algs enabled | ||
198 | */ | 299 | */ |
199 | struct s5p_aes_dev { | 300 | struct s5p_aes_dev { |
200 | struct device *dev; | 301 | struct device *dev; |
@@ -215,16 +316,83 @@ struct s5p_aes_dev { | |||
215 | struct crypto_queue queue; | 316 | struct crypto_queue queue; |
216 | bool busy; | 317 | bool busy; |
217 | spinlock_t lock; | 318 | spinlock_t lock; |
319 | |||
320 | struct resource *res; | ||
321 | void __iomem *io_hash_base; | ||
322 | |||
323 | spinlock_t hash_lock; /* protect hash_ vars */ | ||
324 | unsigned long hash_flags; | ||
325 | struct crypto_queue hash_queue; | ||
326 | struct tasklet_struct hash_tasklet; | ||
327 | |||
328 | u8 xmit_buf[BUFLEN]; | ||
329 | struct ahash_request *hash_req; | ||
330 | struct scatterlist *hash_sg_iter; | ||
331 | unsigned int hash_sg_cnt; | ||
332 | |||
333 | bool use_hash; | ||
218 | }; | 334 | }; |
219 | 335 | ||
220 | static struct s5p_aes_dev *s5p_dev; | 336 | /** |
337 | * struct s5p_hash_reqctx - HASH request context | ||
338 | * @dd: Associated device | ||
339 | * @op_update: Current request operation (OP_UPDATE or OP_FINAL) | ||
340 | * @digcnt: Number of bytes processed by HW (without buffer[] ones) | ||
341 | * @digest: Digest message or IV for partial result | ||
342 | * @nregs: Number of HW registers for digest or IV read/write | ||
343 | * @engine: Bits for selecting type of HASH in SSS block | ||
344 | * @sg: sg for DMA transfer | ||
345 | * @sg_len: Length of sg for DMA transfer | ||
346 | * @sgl[]: sg for joining buffer and req->src scatterlist | ||
347 | * @skip: Skip offset in req->src for current op | ||
348 | * @total: Total number of bytes for current request | ||
349 | * @finup: Keep state for finup or final. | ||
350 | * @error: Keep track of error. | ||
351 | * @bufcnt: Number of bytes holded in buffer[] | ||
352 | * @buffer[]: For byte(s) from end of req->src in UPDATE op | ||
353 | */ | ||
354 | struct s5p_hash_reqctx { | ||
355 | struct s5p_aes_dev *dd; | ||
356 | bool op_update; | ||
357 | |||
358 | u64 digcnt; | ||
359 | u8 digest[SHA256_DIGEST_SIZE]; | ||
360 | |||
361 | unsigned int nregs; /* digest_size / sizeof(reg) */ | ||
362 | u32 engine; | ||
363 | |||
364 | struct scatterlist *sg; | ||
365 | unsigned int sg_len; | ||
366 | struct scatterlist sgl[2]; | ||
367 | unsigned int skip; | ||
368 | unsigned int total; | ||
369 | bool finup; | ||
370 | bool error; | ||
371 | |||
372 | u32 bufcnt; | ||
373 | u8 buffer[0]; | ||
374 | }; | ||
375 | |||
376 | /** | ||
377 | * struct s5p_hash_ctx - HASH transformation context | ||
378 | * @dd: Associated device | ||
379 | * @flags: Bits for algorithm HASH. | ||
380 | * @fallback: Software transformation for zero message or size < BUFLEN. | ||
381 | */ | ||
382 | struct s5p_hash_ctx { | ||
383 | struct s5p_aes_dev *dd; | ||
384 | unsigned long flags; | ||
385 | struct crypto_shash *fallback; | ||
386 | }; | ||
221 | 387 | ||
222 | static const struct samsung_aes_variant s5p_aes_data = { | 388 | static const struct samsung_aes_variant s5p_aes_data = { |
223 | .aes_offset = 0x4000, | 389 | .aes_offset = 0x4000, |
390 | .hash_offset = 0x6000, | ||
224 | }; | 391 | }; |
225 | 392 | ||
226 | static const struct samsung_aes_variant exynos_aes_data = { | 393 | static const struct samsung_aes_variant exynos_aes_data = { |
227 | .aes_offset = 0x200, | 394 | .aes_offset = 0x200, |
395 | .hash_offset = 0x400, | ||
228 | }; | 396 | }; |
229 | 397 | ||
230 | static const struct of_device_id s5p_sss_dt_match[] = { | 398 | static const struct of_device_id s5p_sss_dt_match[] = { |
@@ -254,6 +422,8 @@ static inline struct samsung_aes_variant *find_s5p_sss_version | |||
254 | platform_get_device_id(pdev)->driver_data; | 422 | platform_get_device_id(pdev)->driver_data; |
255 | } | 423 | } |
256 | 424 | ||
425 | static struct s5p_aes_dev *s5p_dev; | ||
426 | |||
257 | static void s5p_set_dma_indata(struct s5p_aes_dev *dev, struct scatterlist *sg) | 427 | static void s5p_set_dma_indata(struct s5p_aes_dev *dev, struct scatterlist *sg) |
258 | { | 428 | { |
259 | SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg)); | 429 | SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg)); |
@@ -436,15 +606,65 @@ static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/) | |||
436 | return ret; | 606 | return ret; |
437 | } | 607 | } |
438 | 608 | ||
609 | static inline u32 s5p_hash_read(struct s5p_aes_dev *dd, u32 offset) | ||
610 | { | ||
611 | return __raw_readl(dd->io_hash_base + offset); | ||
612 | } | ||
613 | |||
614 | static inline void s5p_hash_write(struct s5p_aes_dev *dd, | ||
615 | u32 offset, u32 value) | ||
616 | { | ||
617 | __raw_writel(value, dd->io_hash_base + offset); | ||
618 | } | ||
619 | |||
620 | /** | ||
621 | * s5p_set_dma_hashdata() - start DMA with sg | ||
622 | * @dev: device | ||
623 | * @sg: scatterlist ready to DMA transmit | ||
624 | */ | ||
625 | static void s5p_set_dma_hashdata(struct s5p_aes_dev *dev, | ||
626 | struct scatterlist *sg) | ||
627 | { | ||
628 | dev->hash_sg_cnt--; | ||
629 | SSS_WRITE(dev, FCHRDMAS, sg_dma_address(sg)); | ||
630 | SSS_WRITE(dev, FCHRDMAL, sg_dma_len(sg)); /* DMA starts */ | ||
631 | } | ||
632 | |||
633 | /** | ||
634 | * s5p_hash_rx() - get next hash_sg_iter | ||
635 | * @dev: device | ||
636 | * | ||
637 | * Return: | ||
638 | * 2 if there is no more data and it is UPDATE op | ||
639 | * 1 if new receiving (input) data is ready and can be written to device | ||
640 | * 0 if there is no more data and it is FINAL op | ||
641 | */ | ||
642 | static int s5p_hash_rx(struct s5p_aes_dev *dev) | ||
643 | { | ||
644 | if (dev->hash_sg_cnt > 0) { | ||
645 | dev->hash_sg_iter = sg_next(dev->hash_sg_iter); | ||
646 | return 1; | ||
647 | } | ||
648 | |||
649 | set_bit(HASH_FLAGS_DMA_READY, &dev->hash_flags); | ||
650 | if (test_bit(HASH_FLAGS_FINAL, &dev->hash_flags)) | ||
651 | return 0; | ||
652 | |||
653 | return 2; | ||
654 | } | ||
655 | |||
439 | static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id) | 656 | static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id) |
440 | { | 657 | { |
441 | struct platform_device *pdev = dev_id; | 658 | struct platform_device *pdev = dev_id; |
442 | struct s5p_aes_dev *dev = platform_get_drvdata(pdev); | 659 | struct s5p_aes_dev *dev = platform_get_drvdata(pdev); |
443 | int err_dma_tx = 0; | 660 | int err_dma_tx = 0; |
444 | int err_dma_rx = 0; | 661 | int err_dma_rx = 0; |
662 | int err_dma_hx = 0; | ||
445 | bool tx_end = false; | 663 | bool tx_end = false; |
664 | bool hx_end = false; | ||
446 | unsigned long flags; | 665 | unsigned long flags; |
447 | uint32_t status; | 666 | uint32_t status; |
667 | u32 st_bits; | ||
448 | int err; | 668 | int err; |
449 | 669 | ||
450 | spin_lock_irqsave(&dev->lock, flags); | 670 | spin_lock_irqsave(&dev->lock, flags); |
@@ -456,6 +676,8 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id) | |||
456 | * | 676 | * |
457 | * If there is no more data in tx scatter list, call s5p_aes_complete() | 677 | * If there is no more data in tx scatter list, call s5p_aes_complete() |
458 | * and schedule new tasklet. | 678 | * and schedule new tasklet. |
679 | * | ||
680 | * Handle hx interrupt. If there is still data map next entry. | ||
459 | */ | 681 | */ |
460 | status = SSS_READ(dev, FCINTSTAT); | 682 | status = SSS_READ(dev, FCINTSTAT); |
461 | if (status & SSS_FCINTSTAT_BRDMAINT) | 683 | if (status & SSS_FCINTSTAT_BRDMAINT) |
@@ -467,7 +689,29 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id) | |||
467 | err_dma_tx = s5p_aes_tx(dev); | 689 | err_dma_tx = s5p_aes_tx(dev); |
468 | } | 690 | } |
469 | 691 | ||
470 | SSS_WRITE(dev, FCINTPEND, status); | 692 | if (status & SSS_FCINTSTAT_HRDMAINT) |
693 | err_dma_hx = s5p_hash_rx(dev); | ||
694 | |||
695 | st_bits = status & (SSS_FCINTSTAT_BRDMAINT | SSS_FCINTSTAT_BTDMAINT | | ||
696 | SSS_FCINTSTAT_HRDMAINT); | ||
697 | /* clear DMA bits */ | ||
698 | SSS_WRITE(dev, FCINTPEND, st_bits); | ||
699 | |||
700 | /* clear HASH irq bits */ | ||
701 | if (status & (SSS_FCINTSTAT_HDONEINT | SSS_FCINTSTAT_HPARTINT)) { | ||
702 | /* cannot have both HPART and HDONE */ | ||
703 | if (status & SSS_FCINTSTAT_HPARTINT) | ||
704 | st_bits = SSS_HASH_STATUS_PARTIAL_DONE; | ||
705 | |||
706 | if (status & SSS_FCINTSTAT_HDONEINT) | ||
707 | st_bits = SSS_HASH_STATUS_MSG_DONE; | ||
708 | |||
709 | set_bit(HASH_FLAGS_OUTPUT_READY, &dev->hash_flags); | ||
710 | s5p_hash_write(dev, SSS_REG_HASH_STATUS, st_bits); | ||
711 | hx_end = true; | ||
712 | /* when DONE or PART, do not handle HASH DMA */ | ||
713 | err_dma_hx = 0; | ||
714 | } | ||
471 | 715 | ||
472 | if (err_dma_rx < 0) { | 716 | if (err_dma_rx < 0) { |
473 | err = err_dma_rx; | 717 | err = err_dma_rx; |
@@ -480,6 +724,8 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id) | |||
480 | 724 | ||
481 | if (tx_end) { | 725 | if (tx_end) { |
482 | s5p_sg_done(dev); | 726 | s5p_sg_done(dev); |
727 | if (err_dma_hx == 1) | ||
728 | s5p_set_dma_hashdata(dev, dev->hash_sg_iter); | ||
483 | 729 | ||
484 | spin_unlock_irqrestore(&dev->lock, flags); | 730 | spin_unlock_irqrestore(&dev->lock, flags); |
485 | 731 | ||
@@ -497,21 +743,1100 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id) | |||
497 | s5p_set_dma_outdata(dev, dev->sg_dst); | 743 | s5p_set_dma_outdata(dev, dev->sg_dst); |
498 | if (err_dma_rx == 1) | 744 | if (err_dma_rx == 1) |
499 | s5p_set_dma_indata(dev, dev->sg_src); | 745 | s5p_set_dma_indata(dev, dev->sg_src); |
746 | if (err_dma_hx == 1) | ||
747 | s5p_set_dma_hashdata(dev, dev->hash_sg_iter); | ||
500 | 748 | ||
501 | spin_unlock_irqrestore(&dev->lock, flags); | 749 | spin_unlock_irqrestore(&dev->lock, flags); |
502 | } | 750 | } |
503 | 751 | ||
504 | return IRQ_HANDLED; | 752 | goto hash_irq_end; |
505 | 753 | ||
506 | error: | 754 | error: |
507 | s5p_sg_done(dev); | 755 | s5p_sg_done(dev); |
508 | dev->busy = false; | 756 | dev->busy = false; |
757 | if (err_dma_hx == 1) | ||
758 | s5p_set_dma_hashdata(dev, dev->hash_sg_iter); | ||
759 | |||
509 | spin_unlock_irqrestore(&dev->lock, flags); | 760 | spin_unlock_irqrestore(&dev->lock, flags); |
510 | s5p_aes_complete(dev, err); | 761 | s5p_aes_complete(dev, err); |
511 | 762 | ||
763 | hash_irq_end: | ||
764 | /* | ||
765 | * Note about else if: | ||
766 | * when hash_sg_iter reaches end and its UPDATE op, | ||
767 | * issue SSS_HASH_PAUSE and wait for HPART irq | ||
768 | */ | ||
769 | if (hx_end) | ||
770 | tasklet_schedule(&dev->hash_tasklet); | ||
771 | else if (err_dma_hx == 2) | ||
772 | s5p_hash_write(dev, SSS_REG_HASH_CTRL_PAUSE, | ||
773 | SSS_HASH_PAUSE); | ||
774 | |||
512 | return IRQ_HANDLED; | 775 | return IRQ_HANDLED; |
513 | } | 776 | } |
514 | 777 | ||
778 | /** | ||
779 | * s5p_hash_read_msg() - read message or IV from HW | ||
780 | * @req: AHASH request | ||
781 | */ | ||
782 | static void s5p_hash_read_msg(struct ahash_request *req) | ||
783 | { | ||
784 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); | ||
785 | struct s5p_aes_dev *dd = ctx->dd; | ||
786 | u32 *hash = (u32 *)ctx->digest; | ||
787 | unsigned int i; | ||
788 | |||
789 | for (i = 0; i < ctx->nregs; i++) | ||
790 | hash[i] = s5p_hash_read(dd, SSS_REG_HASH_OUT(i)); | ||
791 | } | ||
792 | |||
793 | /** | ||
794 | * s5p_hash_write_ctx_iv() - write IV for next partial/finup op. | ||
795 | * @dd: device | ||
796 | * @ctx: request context | ||
797 | */ | ||
798 | static void s5p_hash_write_ctx_iv(struct s5p_aes_dev *dd, | ||
799 | struct s5p_hash_reqctx *ctx) | ||
800 | { | ||
801 | u32 *hash = (u32 *)ctx->digest; | ||
802 | unsigned int i; | ||
803 | |||
804 | for (i = 0; i < ctx->nregs; i++) | ||
805 | s5p_hash_write(dd, SSS_REG_HASH_IV(i), hash[i]); | ||
806 | } | ||
807 | |||
808 | /** | ||
809 | * s5p_hash_write_iv() - write IV for next partial/finup op. | ||
810 | * @req: AHASH request | ||
811 | */ | ||
812 | static void s5p_hash_write_iv(struct ahash_request *req) | ||
813 | { | ||
814 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); | ||
815 | |||
816 | s5p_hash_write_ctx_iv(ctx->dd, ctx); | ||
817 | } | ||
818 | |||
819 | /** | ||
820 | * s5p_hash_copy_result() - copy digest into req->result | ||
821 | * @req: AHASH request | ||
822 | */ | ||
823 | static void s5p_hash_copy_result(struct ahash_request *req) | ||
824 | { | ||
825 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); | ||
826 | |||
827 | if (!req->result) | ||
828 | return; | ||
829 | |||
830 | memcpy(req->result, ctx->digest, ctx->nregs * HASH_REG_SIZEOF); | ||
831 | } | ||
832 | |||
833 | /** | ||
834 | * s5p_hash_dma_flush() - flush HASH DMA | ||
835 | * @dev: secss device | ||
836 | */ | ||
837 | static void s5p_hash_dma_flush(struct s5p_aes_dev *dev) | ||
838 | { | ||
839 | SSS_WRITE(dev, FCHRDMAC, SSS_FCHRDMAC_FLUSH); | ||
840 | } | ||
841 | |||
842 | /** | ||
843 | * s5p_hash_dma_enable() - enable DMA mode for HASH | ||
844 | * @dev: secss device | ||
845 | * | ||
846 | * enable DMA mode for HASH | ||
847 | */ | ||
848 | static void s5p_hash_dma_enable(struct s5p_aes_dev *dev) | ||
849 | { | ||
850 | s5p_hash_write(dev, SSS_REG_HASH_CTRL_FIFO, SSS_HASH_FIFO_MODE_DMA); | ||
851 | } | ||
852 | |||
853 | /** | ||
854 | * s5p_hash_irq_disable() - disable irq HASH signals | ||
855 | * @dev: secss device | ||
856 | * @flags: bitfield with irq's to be disabled | ||
857 | */ | ||
858 | static void s5p_hash_irq_disable(struct s5p_aes_dev *dev, u32 flags) | ||
859 | { | ||
860 | SSS_WRITE(dev, FCINTENCLR, flags); | ||
861 | } | ||
862 | |||
863 | /** | ||
864 | * s5p_hash_irq_enable() - enable irq signals | ||
865 | * @dev: secss device | ||
866 | * @flags: bitfield with irq's to be enabled | ||
867 | */ | ||
868 | static void s5p_hash_irq_enable(struct s5p_aes_dev *dev, int flags) | ||
869 | { | ||
870 | SSS_WRITE(dev, FCINTENSET, flags); | ||
871 | } | ||
872 | |||
873 | /** | ||
874 | * s5p_hash_set_flow() - set flow inside SecSS AES/DES with/without HASH | ||
875 | * @dev: secss device | ||
876 | * @hashflow: HASH stream flow with/without crypto AES/DES | ||
877 | */ | ||
878 | static void s5p_hash_set_flow(struct s5p_aes_dev *dev, u32 hashflow) | ||
879 | { | ||
880 | unsigned long flags; | ||
881 | u32 flow; | ||
882 | |||
883 | spin_lock_irqsave(&dev->lock, flags); | ||
884 | |||
885 | flow = SSS_READ(dev, FCFIFOCTRL); | ||
886 | flow &= ~SSS_HASHIN_MASK; | ||
887 | flow |= hashflow; | ||
888 | SSS_WRITE(dev, FCFIFOCTRL, flow); | ||
889 | |||
890 | spin_unlock_irqrestore(&dev->lock, flags); | ||
891 | } | ||
892 | |||
893 | /** | ||
894 | * s5p_ahash_dma_init() - enable DMA and set HASH flow inside SecSS | ||
895 | * @dev: secss device | ||
896 | * @hashflow: HASH stream flow with/without AES/DES | ||
897 | * | ||
898 | * flush HASH DMA and enable DMA, set HASH stream flow inside SecSS HW, | ||
899 | * enable HASH irq's HRDMA, HDONE, HPART | ||
900 | */ | ||
901 | static void s5p_ahash_dma_init(struct s5p_aes_dev *dev, u32 hashflow) | ||
902 | { | ||
903 | s5p_hash_irq_disable(dev, SSS_FCINTENCLR_HRDMAINTENCLR | | ||
904 | SSS_FCINTENCLR_HDONEINTENCLR | | ||
905 | SSS_FCINTENCLR_HPARTINTENCLR); | ||
906 | s5p_hash_dma_flush(dev); | ||
907 | |||
908 | s5p_hash_dma_enable(dev); | ||
909 | s5p_hash_set_flow(dev, hashflow & SSS_HASHIN_MASK); | ||
910 | s5p_hash_irq_enable(dev, SSS_FCINTENSET_HRDMAINTENSET | | ||
911 | SSS_FCINTENSET_HDONEINTENSET | | ||
912 | SSS_FCINTENSET_HPARTINTENSET); | ||
913 | } | ||
914 | |||
915 | /** | ||
916 | * s5p_hash_write_ctrl() - prepare HASH block in SecSS for processing | ||
917 | * @dd: secss device | ||
918 | * @length: length for request | ||
919 | * @final: true if final op | ||
920 | * | ||
921 | * Prepare SSS HASH block for processing bytes in DMA mode. If it is called | ||
922 | * after previous updates, fill up IV words. For final, calculate and set | ||
923 | * lengths for HASH so SecSS can finalize hash. For partial, set SSS HASH | ||
924 | * length as 2^63 so it will be never reached and set to zero prelow and | ||
925 | * prehigh. | ||
926 | * | ||
927 | * This function does not start DMA transfer. | ||
928 | */ | ||
929 | static void s5p_hash_write_ctrl(struct s5p_aes_dev *dd, size_t length, | ||
930 | bool final) | ||
931 | { | ||
932 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req); | ||
933 | u32 prelow, prehigh, low, high; | ||
934 | u32 configflags, swapflags; | ||
935 | u64 tmplen; | ||
936 | |||
937 | configflags = ctx->engine | SSS_HASH_INIT_BIT; | ||
938 | |||
939 | if (likely(ctx->digcnt)) { | ||
940 | s5p_hash_write_ctx_iv(dd, ctx); | ||
941 | configflags |= SSS_HASH_USER_IV_EN; | ||
942 | } | ||
943 | |||
944 | if (final) { | ||
945 | /* number of bytes for last part */ | ||
946 | low = length; | ||
947 | high = 0; | ||
948 | /* total number of bits prev hashed */ | ||
949 | tmplen = ctx->digcnt * 8; | ||
950 | prelow = (u32)tmplen; | ||
951 | prehigh = (u32)(tmplen >> 32); | ||
952 | } else { | ||
953 | prelow = 0; | ||
954 | prehigh = 0; | ||
955 | low = 0; | ||
956 | high = BIT(31); | ||
957 | } | ||
958 | |||
959 | swapflags = SSS_HASH_BYTESWAP_DI | SSS_HASH_BYTESWAP_DO | | ||
960 | SSS_HASH_BYTESWAP_IV | SSS_HASH_BYTESWAP_KEY; | ||
961 | |||
962 | s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_LOW, low); | ||
963 | s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_HIGH, high); | ||
964 | s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_LOW, prelow); | ||
965 | s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_HIGH, prehigh); | ||
966 | |||
967 | s5p_hash_write(dd, SSS_REG_HASH_CTRL_SWAP, swapflags); | ||
968 | s5p_hash_write(dd, SSS_REG_HASH_CTRL, configflags); | ||
969 | } | ||
970 | |||
971 | /** | ||
972 | * s5p_hash_xmit_dma() - start DMA hash processing | ||
973 | * @dd: secss device | ||
974 | * @length: length for request | ||
975 | * @final: true if final op | ||
976 | * | ||
977 | * Update digcnt here, as it is needed for finup/final op. | ||
978 | */ | ||
979 | static int s5p_hash_xmit_dma(struct s5p_aes_dev *dd, size_t length, | ||
980 | bool final) | ||
981 | { | ||
982 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req); | ||
983 | unsigned int cnt; | ||
984 | |||
985 | cnt = dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE); | ||
986 | if (!cnt) { | ||
987 | dev_err(dd->dev, "dma_map_sg error\n"); | ||
988 | ctx->error = true; | ||
989 | return -EINVAL; | ||
990 | } | ||
991 | |||
992 | set_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags); | ||
993 | dd->hash_sg_iter = ctx->sg; | ||
994 | dd->hash_sg_cnt = cnt; | ||
995 | s5p_hash_write_ctrl(dd, length, final); | ||
996 | ctx->digcnt += length; | ||
997 | ctx->total -= length; | ||
998 | |||
999 | /* catch last interrupt */ | ||
1000 | if (final) | ||
1001 | set_bit(HASH_FLAGS_FINAL, &dd->hash_flags); | ||
1002 | |||
1003 | s5p_set_dma_hashdata(dd, dd->hash_sg_iter); /* DMA starts */ | ||
1004 | |||
1005 | return -EINPROGRESS; | ||
1006 | } | ||
1007 | |||
1008 | /** | ||
1009 | * s5p_hash_copy_sgs() - copy request's bytes into new buffer | ||
1010 | * @ctx: request context | ||
1011 | * @sg: source scatterlist request | ||
1012 | * @new_len: number of bytes to process from sg | ||
1013 | * | ||
1014 | * Allocate new buffer, copy data for HASH into it. If there was xmit_buf | ||
1015 | * filled, copy it first, then copy data from sg into it. Prepare one sgl[0] | ||
1016 | * with allocated buffer. | ||
1017 | * | ||
1018 | * Set bit in dd->hash_flag so we can free it after irq ends processing. | ||
1019 | */ | ||
1020 | static int s5p_hash_copy_sgs(struct s5p_hash_reqctx *ctx, | ||
1021 | struct scatterlist *sg, unsigned int new_len) | ||
1022 | { | ||
1023 | unsigned int pages, len; | ||
1024 | void *buf; | ||
1025 | |||
1026 | len = new_len + ctx->bufcnt; | ||
1027 | pages = get_order(len); | ||
1028 | |||
1029 | buf = (void *)__get_free_pages(GFP_ATOMIC, pages); | ||
1030 | if (!buf) { | ||
1031 | dev_err(ctx->dd->dev, "alloc pages for unaligned case.\n"); | ||
1032 | ctx->error = true; | ||
1033 | return -ENOMEM; | ||
1034 | } | ||
1035 | |||
1036 | if (ctx->bufcnt) | ||
1037 | memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt); | ||
1038 | |||
1039 | scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->skip, | ||
1040 | new_len, 0); | ||
1041 | sg_init_table(ctx->sgl, 1); | ||
1042 | sg_set_buf(ctx->sgl, buf, len); | ||
1043 | ctx->sg = ctx->sgl; | ||
1044 | ctx->sg_len = 1; | ||
1045 | ctx->bufcnt = 0; | ||
1046 | ctx->skip = 0; | ||
1047 | set_bit(HASH_FLAGS_SGS_COPIED, &ctx->dd->hash_flags); | ||
1048 | |||
1049 | return 0; | ||
1050 | } | ||
1051 | |||
1052 | /** | ||
1053 | * s5p_hash_copy_sg_lists() - copy sg list and make fixes in copy | ||
1054 | * @ctx: request context | ||
1055 | * @sg: source scatterlist request | ||
1056 | * @new_len: number of bytes to process from sg | ||
1057 | * | ||
1058 | * Allocate new scatterlist table, copy data for HASH into it. If there was | ||
1059 | * xmit_buf filled, prepare it first, then copy page, length and offset from | ||
1060 | * source sg into it, adjusting begin and/or end for skip offset and | ||
1061 | * hash_later value. | ||
1062 | * | ||
1063 | * Resulting sg table will be assigned to ctx->sg. Set flag so we can free | ||
1064 | * it after irq ends processing. | ||
1065 | */ | ||
1066 | static int s5p_hash_copy_sg_lists(struct s5p_hash_reqctx *ctx, | ||
1067 | struct scatterlist *sg, unsigned int new_len) | ||
1068 | { | ||
1069 | unsigned int skip = ctx->skip, n = sg_nents(sg); | ||
1070 | struct scatterlist *tmp; | ||
1071 | unsigned int len; | ||
1072 | |||
1073 | if (ctx->bufcnt) | ||
1074 | n++; | ||
1075 | |||
1076 | ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL); | ||
1077 | if (!ctx->sg) { | ||
1078 | ctx->error = true; | ||
1079 | return -ENOMEM; | ||
1080 | } | ||
1081 | |||
1082 | sg_init_table(ctx->sg, n); | ||
1083 | |||
1084 | tmp = ctx->sg; | ||
1085 | |||
1086 | ctx->sg_len = 0; | ||
1087 | |||
1088 | if (ctx->bufcnt) { | ||
1089 | sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt); | ||
1090 | tmp = sg_next(tmp); | ||
1091 | ctx->sg_len++; | ||
1092 | } | ||
1093 | |||
1094 | while (sg && skip >= sg->length) { | ||
1095 | skip -= sg->length; | ||
1096 | sg = sg_next(sg); | ||
1097 | } | ||
1098 | |||
1099 | while (sg && new_len) { | ||
1100 | len = sg->length - skip; | ||
1101 | if (new_len < len) | ||
1102 | len = new_len; | ||
1103 | |||
1104 | new_len -= len; | ||
1105 | sg_set_page(tmp, sg_page(sg), len, sg->offset + skip); | ||
1106 | skip = 0; | ||
1107 | if (new_len <= 0) | ||
1108 | sg_mark_end(tmp); | ||
1109 | |||
1110 | tmp = sg_next(tmp); | ||
1111 | ctx->sg_len++; | ||
1112 | sg = sg_next(sg); | ||
1113 | } | ||
1114 | |||
1115 | set_bit(HASH_FLAGS_SGS_ALLOCED, &ctx->dd->hash_flags); | ||
1116 | |||
1117 | return 0; | ||
1118 | } | ||
1119 | |||
1120 | /** | ||
1121 | * s5p_hash_prepare_sgs() - prepare sg for processing | ||
1122 | * @ctx: request context | ||
1123 | * @sg: source scatterlist request | ||
1124 | * @nbytes: number of bytes to process from sg | ||
1125 | * @final: final flag | ||
1126 | * | ||
1127 | * Check two conditions: (1) if buffers in sg have len aligned data, and (2) | ||
1128 | * sg table have good aligned elements (list_ok). If one of this checks fails, | ||
1129 | * then either (1) allocates new buffer for data with s5p_hash_copy_sgs, copy | ||
1130 | * data into this buffer and prepare request in sgl, or (2) allocates new sg | ||
1131 | * table and prepare sg elements. | ||
1132 | * | ||
1133 | * For digest or finup all conditions can be good, and we may not need any | ||
1134 | * fixes. | ||
1135 | */ | ||
1136 | static int s5p_hash_prepare_sgs(struct s5p_hash_reqctx *ctx, | ||
1137 | struct scatterlist *sg, | ||
1138 | unsigned int new_len, bool final) | ||
1139 | { | ||
1140 | unsigned int skip = ctx->skip, nbytes = new_len, n = 0; | ||
1141 | bool aligned = true, list_ok = true; | ||
1142 | struct scatterlist *sg_tmp = sg; | ||
1143 | |||
1144 | if (!sg || !sg->length || !new_len) | ||
1145 | return 0; | ||
1146 | |||
1147 | if (skip || !final) | ||
1148 | list_ok = false; | ||
1149 | |||
1150 | while (nbytes > 0 && sg_tmp) { | ||
1151 | n++; | ||
1152 | if (skip >= sg_tmp->length) { | ||
1153 | skip -= sg_tmp->length; | ||
1154 | if (!sg_tmp->length) { | ||
1155 | aligned = false; | ||
1156 | break; | ||
1157 | } | ||
1158 | } else { | ||
1159 | if (!IS_ALIGNED(sg_tmp->length - skip, BUFLEN)) { | ||
1160 | aligned = false; | ||
1161 | break; | ||
1162 | } | ||
1163 | |||
1164 | if (nbytes < sg_tmp->length - skip) { | ||
1165 | list_ok = false; | ||
1166 | break; | ||
1167 | } | ||
1168 | |||
1169 | nbytes -= sg_tmp->length - skip; | ||
1170 | skip = 0; | ||
1171 | } | ||
1172 | |||
1173 | sg_tmp = sg_next(sg_tmp); | ||
1174 | } | ||
1175 | |||
1176 | if (!aligned) | ||
1177 | return s5p_hash_copy_sgs(ctx, sg, new_len); | ||
1178 | else if (!list_ok) | ||
1179 | return s5p_hash_copy_sg_lists(ctx, sg, new_len); | ||
1180 | |||
1181 | /* | ||
1182 | * Have aligned data from previous operation and/or current | ||
1183 | * Note: will enter here only if (digest or finup) and aligned | ||
1184 | */ | ||
1185 | if (ctx->bufcnt) { | ||
1186 | ctx->sg_len = n; | ||
1187 | sg_init_table(ctx->sgl, 2); | ||
1188 | sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, ctx->bufcnt); | ||
1189 | sg_chain(ctx->sgl, 2, sg); | ||
1190 | ctx->sg = ctx->sgl; | ||
1191 | ctx->sg_len++; | ||
1192 | } else { | ||
1193 | ctx->sg = sg; | ||
1194 | ctx->sg_len = n; | ||
1195 | } | ||
1196 | |||
1197 | return 0; | ||
1198 | } | ||
1199 | |||
1200 | /** | ||
1201 | * s5p_hash_prepare_request() - prepare request for processing | ||
1202 | * @req: AHASH request | ||
1203 | * @update: true if UPDATE op | ||
1204 | * | ||
1205 | * Note 1: we can have update flag _and_ final flag at the same time. | ||
1206 | * Note 2: we enter here when digcnt > BUFLEN (=HASH_BLOCK_SIZE) or | ||
1207 | * either req->nbytes or ctx->bufcnt + req->nbytes is > BUFLEN or | ||
1208 | * we have final op | ||
1209 | */ | ||
1210 | static int s5p_hash_prepare_request(struct ahash_request *req, bool update) | ||
1211 | { | ||
1212 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); | ||
1213 | bool final = ctx->finup; | ||
1214 | int xmit_len, hash_later, nbytes; | ||
1215 | int ret; | ||
1216 | |||
1217 | if (!req) | ||
1218 | return 0; | ||
1219 | |||
1220 | if (update) | ||
1221 | nbytes = req->nbytes; | ||
1222 | else | ||
1223 | nbytes = 0; | ||
1224 | |||
1225 | ctx->total = nbytes + ctx->bufcnt; | ||
1226 | if (!ctx->total) | ||
1227 | return 0; | ||
1228 | |||
1229 | if (nbytes && (!IS_ALIGNED(ctx->bufcnt, BUFLEN))) { | ||
1230 | /* bytes left from previous request, so fill up to BUFLEN */ | ||
1231 | int len = BUFLEN - ctx->bufcnt % BUFLEN; | ||
1232 | |||
1233 | if (len > nbytes) | ||
1234 | len = nbytes; | ||
1235 | |||
1236 | scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src, | ||
1237 | 0, len, 0); | ||
1238 | ctx->bufcnt += len; | ||
1239 | nbytes -= len; | ||
1240 | ctx->skip = len; | ||
1241 | } else { | ||
1242 | ctx->skip = 0; | ||
1243 | } | ||
1244 | |||
1245 | if (ctx->bufcnt) | ||
1246 | memcpy(ctx->dd->xmit_buf, ctx->buffer, ctx->bufcnt); | ||
1247 | |||
1248 | xmit_len = ctx->total; | ||
1249 | if (final) { | ||
1250 | hash_later = 0; | ||
1251 | } else { | ||
1252 | if (IS_ALIGNED(xmit_len, BUFLEN)) | ||
1253 | xmit_len -= BUFLEN; | ||
1254 | else | ||
1255 | xmit_len -= xmit_len & (BUFLEN - 1); | ||
1256 | |||
1257 | hash_later = ctx->total - xmit_len; | ||
1258 | /* copy hash_later bytes from end of req->src */ | ||
1259 | /* previous bytes are in xmit_buf, so no overwrite */ | ||
1260 | scatterwalk_map_and_copy(ctx->buffer, req->src, | ||
1261 | req->nbytes - hash_later, | ||
1262 | hash_later, 0); | ||
1263 | } | ||
1264 | |||
1265 | if (xmit_len > BUFLEN) { | ||
1266 | ret = s5p_hash_prepare_sgs(ctx, req->src, nbytes - hash_later, | ||
1267 | final); | ||
1268 | if (ret) | ||
1269 | return ret; | ||
1270 | } else { | ||
1271 | /* have buffered data only */ | ||
1272 | if (unlikely(!ctx->bufcnt)) { | ||
1273 | /* first update didn't fill up buffer */ | ||
1274 | scatterwalk_map_and_copy(ctx->dd->xmit_buf, req->src, | ||
1275 | 0, xmit_len, 0); | ||
1276 | } | ||
1277 | |||
1278 | sg_init_table(ctx->sgl, 1); | ||
1279 | sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, xmit_len); | ||
1280 | |||
1281 | ctx->sg = ctx->sgl; | ||
1282 | ctx->sg_len = 1; | ||
1283 | } | ||
1284 | |||
1285 | ctx->bufcnt = hash_later; | ||
1286 | if (!final) | ||
1287 | ctx->total = xmit_len; | ||
1288 | |||
1289 | return 0; | ||
1290 | } | ||
1291 | |||
1292 | /** | ||
1293 | * s5p_hash_update_dma_stop() - unmap DMA | ||
1294 | * @dd: secss device | ||
1295 | * | ||
1296 | * Unmap scatterlist ctx->sg. | ||
1297 | */ | ||
1298 | static void s5p_hash_update_dma_stop(struct s5p_aes_dev *dd) | ||
1299 | { | ||
1300 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req); | ||
1301 | |||
1302 | dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE); | ||
1303 | clear_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags); | ||
1304 | } | ||
1305 | |||
1306 | /** | ||
1307 | * s5p_hash_finish() - copy calculated digest to crypto layer | ||
1308 | * @req: AHASH request | ||
1309 | */ | ||
1310 | static void s5p_hash_finish(struct ahash_request *req) | ||
1311 | { | ||
1312 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); | ||
1313 | struct s5p_aes_dev *dd = ctx->dd; | ||
1314 | |||
1315 | if (ctx->digcnt) | ||
1316 | s5p_hash_copy_result(req); | ||
1317 | |||
1318 | dev_dbg(dd->dev, "hash_finish digcnt: %lld\n", ctx->digcnt); | ||
1319 | } | ||
1320 | |||
1321 | /** | ||
1322 | * s5p_hash_finish_req() - finish request | ||
1323 | * @req: AHASH request | ||
1324 | * @err: error | ||
1325 | */ | ||
1326 | static void s5p_hash_finish_req(struct ahash_request *req, int err) | ||
1327 | { | ||
1328 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); | ||
1329 | struct s5p_aes_dev *dd = ctx->dd; | ||
1330 | unsigned long flags; | ||
1331 | |||
1332 | if (test_bit(HASH_FLAGS_SGS_COPIED, &dd->hash_flags)) | ||
1333 | free_pages((unsigned long)sg_virt(ctx->sg), | ||
1334 | get_order(ctx->sg->length)); | ||
1335 | |||
1336 | if (test_bit(HASH_FLAGS_SGS_ALLOCED, &dd->hash_flags)) | ||
1337 | kfree(ctx->sg); | ||
1338 | |||
1339 | ctx->sg = NULL; | ||
1340 | dd->hash_flags &= ~(BIT(HASH_FLAGS_SGS_ALLOCED) | | ||
1341 | BIT(HASH_FLAGS_SGS_COPIED)); | ||
1342 | |||
1343 | if (!err && !ctx->error) { | ||
1344 | s5p_hash_read_msg(req); | ||
1345 | if (test_bit(HASH_FLAGS_FINAL, &dd->hash_flags)) | ||
1346 | s5p_hash_finish(req); | ||
1347 | } else { | ||
1348 | ctx->error = true; | ||
1349 | } | ||
1350 | |||
1351 | spin_lock_irqsave(&dd->hash_lock, flags); | ||
1352 | dd->hash_flags &= ~(BIT(HASH_FLAGS_BUSY) | BIT(HASH_FLAGS_FINAL) | | ||
1353 | BIT(HASH_FLAGS_DMA_READY) | | ||
1354 | BIT(HASH_FLAGS_OUTPUT_READY)); | ||
1355 | spin_unlock_irqrestore(&dd->hash_lock, flags); | ||
1356 | |||
1357 | if (req->base.complete) | ||
1358 | req->base.complete(&req->base, err); | ||
1359 | } | ||
1360 | |||
1361 | /** | ||
1362 | * s5p_hash_handle_queue() - handle hash queue | ||
1363 | * @dd: device s5p_aes_dev | ||
1364 | * @req: AHASH request | ||
1365 | * | ||
1366 | * If req!=NULL enqueue it on dd->queue, if FLAGS_BUSY is not set on the | ||
1367 | * device then processes the first request from the dd->queue | ||
1368 | * | ||
1369 | * Returns: see s5p_hash_final below. | ||
1370 | */ | ||
1371 | static int s5p_hash_handle_queue(struct s5p_aes_dev *dd, | ||
1372 | struct ahash_request *req) | ||
1373 | { | ||
1374 | struct crypto_async_request *async_req, *backlog; | ||
1375 | struct s5p_hash_reqctx *ctx; | ||
1376 | unsigned long flags; | ||
1377 | int err = 0, ret = 0; | ||
1378 | |||
1379 | retry: | ||
1380 | spin_lock_irqsave(&dd->hash_lock, flags); | ||
1381 | if (req) | ||
1382 | ret = ahash_enqueue_request(&dd->hash_queue, req); | ||
1383 | |||
1384 | if (test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) { | ||
1385 | spin_unlock_irqrestore(&dd->hash_lock, flags); | ||
1386 | return ret; | ||
1387 | } | ||
1388 | |||
1389 | backlog = crypto_get_backlog(&dd->hash_queue); | ||
1390 | async_req = crypto_dequeue_request(&dd->hash_queue); | ||
1391 | if (async_req) | ||
1392 | set_bit(HASH_FLAGS_BUSY, &dd->hash_flags); | ||
1393 | |||
1394 | spin_unlock_irqrestore(&dd->hash_lock, flags); | ||
1395 | |||
1396 | if (!async_req) | ||
1397 | return ret; | ||
1398 | |||
1399 | if (backlog) | ||
1400 | backlog->complete(backlog, -EINPROGRESS); | ||
1401 | |||
1402 | req = ahash_request_cast(async_req); | ||
1403 | dd->hash_req = req; | ||
1404 | ctx = ahash_request_ctx(req); | ||
1405 | |||
1406 | err = s5p_hash_prepare_request(req, ctx->op_update); | ||
1407 | if (err || !ctx->total) | ||
1408 | goto out; | ||
1409 | |||
1410 | dev_dbg(dd->dev, "handling new req, op_update: %u, nbytes: %d\n", | ||
1411 | ctx->op_update, req->nbytes); | ||
1412 | |||
1413 | s5p_ahash_dma_init(dd, SSS_HASHIN_INDEPENDENT); | ||
1414 | if (ctx->digcnt) | ||
1415 | s5p_hash_write_iv(req); /* restore hash IV */ | ||
1416 | |||
1417 | if (ctx->op_update) { /* HASH_OP_UPDATE */ | ||
1418 | err = s5p_hash_xmit_dma(dd, ctx->total, ctx->finup); | ||
1419 | if (err != -EINPROGRESS && ctx->finup && !ctx->error) | ||
1420 | /* no final() after finup() */ | ||
1421 | err = s5p_hash_xmit_dma(dd, ctx->total, true); | ||
1422 | } else { /* HASH_OP_FINAL */ | ||
1423 | err = s5p_hash_xmit_dma(dd, ctx->total, true); | ||
1424 | } | ||
1425 | out: | ||
1426 | if (err != -EINPROGRESS) { | ||
1427 | /* hash_tasklet_cb will not finish it, so do it here */ | ||
1428 | s5p_hash_finish_req(req, err); | ||
1429 | req = NULL; | ||
1430 | |||
1431 | /* | ||
1432 | * Execute next request immediately if there is anything | ||
1433 | * in queue. | ||
1434 | */ | ||
1435 | goto retry; | ||
1436 | } | ||
1437 | |||
1438 | return ret; | ||
1439 | } | ||
1440 | |||
1441 | /** | ||
1442 | * s5p_hash_tasklet_cb() - hash tasklet | ||
1443 | * @data: ptr to s5p_aes_dev | ||
1444 | */ | ||
1445 | static void s5p_hash_tasklet_cb(unsigned long data) | ||
1446 | { | ||
1447 | struct s5p_aes_dev *dd = (struct s5p_aes_dev *)data; | ||
1448 | |||
1449 | if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) { | ||
1450 | s5p_hash_handle_queue(dd, NULL); | ||
1451 | return; | ||
1452 | } | ||
1453 | |||
1454 | if (test_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags)) { | ||
1455 | if (test_and_clear_bit(HASH_FLAGS_DMA_ACTIVE, | ||
1456 | &dd->hash_flags)) { | ||
1457 | s5p_hash_update_dma_stop(dd); | ||
1458 | } | ||
1459 | |||
1460 | if (test_and_clear_bit(HASH_FLAGS_OUTPUT_READY, | ||
1461 | &dd->hash_flags)) { | ||
1462 | /* hash or semi-hash ready */ | ||
1463 | clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags); | ||
1464 | goto finish; | ||
1465 | } | ||
1466 | } | ||
1467 | |||
1468 | return; | ||
1469 | |||
1470 | finish: | ||
1471 | /* finish curent request */ | ||
1472 | s5p_hash_finish_req(dd->hash_req, 0); | ||
1473 | |||
1474 | /* If we are not busy, process next req */ | ||
1475 | if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) | ||
1476 | s5p_hash_handle_queue(dd, NULL); | ||
1477 | } | ||
1478 | |||
1479 | /** | ||
1480 | * s5p_hash_enqueue() - enqueue request | ||
1481 | * @req: AHASH request | ||
1482 | * @op: operation UPDATE (true) or FINAL (false) | ||
1483 | * | ||
1484 | * Returns: see s5p_hash_final below. | ||
1485 | */ | ||
1486 | static int s5p_hash_enqueue(struct ahash_request *req, bool op) | ||
1487 | { | ||
1488 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); | ||
1489 | struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm); | ||
1490 | |||
1491 | ctx->op_update = op; | ||
1492 | |||
1493 | return s5p_hash_handle_queue(tctx->dd, req); | ||
1494 | } | ||
1495 | |||
1496 | /** | ||
1497 | * s5p_hash_update() - process the hash input data | ||
1498 | * @req: AHASH request | ||
1499 | * | ||
1500 | * If request will fit in buffer, copy it and return immediately | ||
1501 | * else enqueue it with OP_UPDATE. | ||
1502 | * | ||
1503 | * Returns: see s5p_hash_final below. | ||
1504 | */ | ||
1505 | static int s5p_hash_update(struct ahash_request *req) | ||
1506 | { | ||
1507 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); | ||
1508 | |||
1509 | if (!req->nbytes) | ||
1510 | return 0; | ||
1511 | |||
1512 | if (ctx->bufcnt + req->nbytes <= BUFLEN) { | ||
1513 | scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src, | ||
1514 | 0, req->nbytes, 0); | ||
1515 | ctx->bufcnt += req->nbytes; | ||
1516 | return 0; | ||
1517 | } | ||
1518 | |||
1519 | return s5p_hash_enqueue(req, true); /* HASH_OP_UPDATE */ | ||
1520 | } | ||
1521 | |||
1522 | /** | ||
1523 | * s5p_hash_shash_digest() - calculate shash digest | ||
1524 | * @tfm: crypto transformation | ||
1525 | * @flags: tfm flags | ||
1526 | * @data: input data | ||
1527 | * @len: length of data | ||
1528 | * @out: output buffer | ||
1529 | */ | ||
1530 | static int s5p_hash_shash_digest(struct crypto_shash *tfm, u32 flags, | ||
1531 | const u8 *data, unsigned int len, u8 *out) | ||
1532 | { | ||
1533 | SHASH_DESC_ON_STACK(shash, tfm); | ||
1534 | |||
1535 | shash->tfm = tfm; | ||
1536 | shash->flags = flags & ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
1537 | |||
1538 | return crypto_shash_digest(shash, data, len, out); | ||
1539 | } | ||
1540 | |||
1541 | /** | ||
1542 | * s5p_hash_final_shash() - calculate shash digest | ||
1543 | * @req: AHASH request | ||
1544 | */ | ||
1545 | static int s5p_hash_final_shash(struct ahash_request *req) | ||
1546 | { | ||
1547 | struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm); | ||
1548 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); | ||
1549 | |||
1550 | return s5p_hash_shash_digest(tctx->fallback, req->base.flags, | ||
1551 | ctx->buffer, ctx->bufcnt, req->result); | ||
1552 | } | ||
1553 | |||
1554 | /** | ||
1555 | * s5p_hash_final() - close up hash and calculate digest | ||
1556 | * @req: AHASH request | ||
1557 | * | ||
1558 | * Note: in final req->src do not have any data, and req->nbytes can be | ||
1559 | * non-zero. | ||
1560 | * | ||
1561 | * If there were no input data processed yet and the buffered hash data is | ||
1562 | * less than BUFLEN (64) then calculate the final hash immediately by using | ||
1563 | * SW algorithm fallback. | ||
1564 | * | ||
1565 | * Otherwise enqueues the current AHASH request with OP_FINAL operation op | ||
1566 | * and finalize hash message in HW. Note that if digcnt!=0 then there were | ||
1567 | * previous update op, so there are always some buffered bytes in ctx->buffer, | ||
1568 | * which means that ctx->bufcnt!=0 | ||
1569 | * | ||
1570 | * Returns: | ||
1571 | * 0 if the request has been processed immediately, | ||
1572 | * -EINPROGRESS if the operation has been queued for later execution or is set | ||
1573 | * to processing by HW, | ||
1574 | * -EBUSY if queue is full and request should be resubmitted later, | ||
1575 | * other negative values denotes an error. | ||
1576 | */ | ||
1577 | static int s5p_hash_final(struct ahash_request *req) | ||
1578 | { | ||
1579 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); | ||
1580 | |||
1581 | ctx->finup = true; | ||
1582 | if (ctx->error) | ||
1583 | return -EINVAL; /* uncompleted hash is not needed */ | ||
1584 | |||
1585 | if (!ctx->digcnt && ctx->bufcnt < BUFLEN) | ||
1586 | return s5p_hash_final_shash(req); | ||
1587 | |||
1588 | return s5p_hash_enqueue(req, false); /* HASH_OP_FINAL */ | ||
1589 | } | ||
1590 | |||
1591 | /** | ||
1592 | * s5p_hash_finup() - process last req->src and calculate digest | ||
1593 | * @req: AHASH request containing the last update data | ||
1594 | * | ||
1595 | * Return values: see s5p_hash_final above. | ||
1596 | */ | ||
1597 | static int s5p_hash_finup(struct ahash_request *req) | ||
1598 | { | ||
1599 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); | ||
1600 | int err1, err2; | ||
1601 | |||
1602 | ctx->finup = true; | ||
1603 | |||
1604 | err1 = s5p_hash_update(req); | ||
1605 | if (err1 == -EINPROGRESS || err1 == -EBUSY) | ||
1606 | return err1; | ||
1607 | |||
1608 | /* | ||
1609 | * final() has to be always called to cleanup resources even if | ||
1610 | * update() failed, except EINPROGRESS or calculate digest for small | ||
1611 | * size | ||
1612 | */ | ||
1613 | err2 = s5p_hash_final(req); | ||
1614 | |||
1615 | return err1 ?: err2; | ||
1616 | } | ||
1617 | |||
1618 | /** | ||
1619 | * s5p_hash_init() - initialize AHASH request contex | ||
1620 | * @req: AHASH request | ||
1621 | * | ||
1622 | * Init async hash request context. | ||
1623 | */ | ||
1624 | static int s5p_hash_init(struct ahash_request *req) | ||
1625 | { | ||
1626 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); | ||
1627 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
1628 | struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm); | ||
1629 | |||
1630 | ctx->dd = tctx->dd; | ||
1631 | ctx->error = false; | ||
1632 | ctx->finup = false; | ||
1633 | ctx->bufcnt = 0; | ||
1634 | ctx->digcnt = 0; | ||
1635 | ctx->total = 0; | ||
1636 | ctx->skip = 0; | ||
1637 | |||
1638 | dev_dbg(tctx->dd->dev, "init: digest size: %d\n", | ||
1639 | crypto_ahash_digestsize(tfm)); | ||
1640 | |||
1641 | switch (crypto_ahash_digestsize(tfm)) { | ||
1642 | case MD5_DIGEST_SIZE: | ||
1643 | ctx->engine = SSS_HASH_ENGINE_MD5; | ||
1644 | ctx->nregs = HASH_MD5_MAX_REG; | ||
1645 | break; | ||
1646 | case SHA1_DIGEST_SIZE: | ||
1647 | ctx->engine = SSS_HASH_ENGINE_SHA1; | ||
1648 | ctx->nregs = HASH_SHA1_MAX_REG; | ||
1649 | break; | ||
1650 | case SHA256_DIGEST_SIZE: | ||
1651 | ctx->engine = SSS_HASH_ENGINE_SHA256; | ||
1652 | ctx->nregs = HASH_SHA256_MAX_REG; | ||
1653 | break; | ||
1654 | default: | ||
1655 | ctx->error = true; | ||
1656 | return -EINVAL; | ||
1657 | } | ||
1658 | |||
1659 | return 0; | ||
1660 | } | ||
1661 | |||
1662 | /** | ||
1663 | * s5p_hash_digest - calculate digest from req->src | ||
1664 | * @req: AHASH request | ||
1665 | * | ||
1666 | * Return values: see s5p_hash_final above. | ||
1667 | */ | ||
1668 | static int s5p_hash_digest(struct ahash_request *req) | ||
1669 | { | ||
1670 | return s5p_hash_init(req) ?: s5p_hash_finup(req); | ||
1671 | } | ||
1672 | |||
1673 | /** | ||
1674 | * s5p_hash_cra_init_alg - init crypto alg transformation | ||
1675 | * @tfm: crypto transformation | ||
1676 | */ | ||
1677 | static int s5p_hash_cra_init_alg(struct crypto_tfm *tfm) | ||
1678 | { | ||
1679 | struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm); | ||
1680 | const char *alg_name = crypto_tfm_alg_name(tfm); | ||
1681 | |||
1682 | tctx->dd = s5p_dev; | ||
1683 | /* Allocate a fallback and abort if it failed. */ | ||
1684 | tctx->fallback = crypto_alloc_shash(alg_name, 0, | ||
1685 | CRYPTO_ALG_NEED_FALLBACK); | ||
1686 | if (IS_ERR(tctx->fallback)) { | ||
1687 | pr_err("fallback alloc fails for '%s'\n", alg_name); | ||
1688 | return PTR_ERR(tctx->fallback); | ||
1689 | } | ||
1690 | |||
1691 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | ||
1692 | sizeof(struct s5p_hash_reqctx) + BUFLEN); | ||
1693 | |||
1694 | return 0; | ||
1695 | } | ||
1696 | |||
1697 | /** | ||
1698 | * s5p_hash_cra_init - init crypto tfm | ||
1699 | * @tfm: crypto transformation | ||
1700 | */ | ||
1701 | static int s5p_hash_cra_init(struct crypto_tfm *tfm) | ||
1702 | { | ||
1703 | return s5p_hash_cra_init_alg(tfm); | ||
1704 | } | ||
1705 | |||
1706 | /** | ||
1707 | * s5p_hash_cra_exit - exit crypto tfm | ||
1708 | * @tfm: crypto transformation | ||
1709 | * | ||
1710 | * free allocated fallback | ||
1711 | */ | ||
1712 | static void s5p_hash_cra_exit(struct crypto_tfm *tfm) | ||
1713 | { | ||
1714 | struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm); | ||
1715 | |||
1716 | crypto_free_shash(tctx->fallback); | ||
1717 | tctx->fallback = NULL; | ||
1718 | } | ||
1719 | |||
1720 | /** | ||
1721 | * s5p_hash_export - export hash state | ||
1722 | * @req: AHASH request | ||
1723 | * @out: buffer for exported state | ||
1724 | */ | ||
1725 | static int s5p_hash_export(struct ahash_request *req, void *out) | ||
1726 | { | ||
1727 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); | ||
1728 | |||
1729 | memcpy(out, ctx, sizeof(*ctx) + ctx->bufcnt); | ||
1730 | |||
1731 | return 0; | ||
1732 | } | ||
1733 | |||
1734 | /** | ||
1735 | * s5p_hash_import - import hash state | ||
1736 | * @req: AHASH request | ||
1737 | * @in: buffer with state to be imported from | ||
1738 | */ | ||
1739 | static int s5p_hash_import(struct ahash_request *req, const void *in) | ||
1740 | { | ||
1741 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); | ||
1742 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
1743 | struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm); | ||
1744 | const struct s5p_hash_reqctx *ctx_in = in; | ||
1745 | |||
1746 | memcpy(ctx, in, sizeof(*ctx) + BUFLEN); | ||
1747 | if (ctx_in->bufcnt > BUFLEN) { | ||
1748 | ctx->error = true; | ||
1749 | return -EINVAL; | ||
1750 | } | ||
1751 | |||
1752 | ctx->dd = tctx->dd; | ||
1753 | ctx->error = false; | ||
1754 | |||
1755 | return 0; | ||
1756 | } | ||
1757 | |||
1758 | static struct ahash_alg algs_sha1_md5_sha256[] = { | ||
1759 | { | ||
1760 | .init = s5p_hash_init, | ||
1761 | .update = s5p_hash_update, | ||
1762 | .final = s5p_hash_final, | ||
1763 | .finup = s5p_hash_finup, | ||
1764 | .digest = s5p_hash_digest, | ||
1765 | .export = s5p_hash_export, | ||
1766 | .import = s5p_hash_import, | ||
1767 | .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN, | ||
1768 | .halg.digestsize = SHA1_DIGEST_SIZE, | ||
1769 | .halg.base = { | ||
1770 | .cra_name = "sha1", | ||
1771 | .cra_driver_name = "exynos-sha1", | ||
1772 | .cra_priority = 100, | ||
1773 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
1774 | CRYPTO_ALG_KERN_DRIVER_ONLY | | ||
1775 | CRYPTO_ALG_ASYNC | | ||
1776 | CRYPTO_ALG_NEED_FALLBACK, | ||
1777 | .cra_blocksize = HASH_BLOCK_SIZE, | ||
1778 | .cra_ctxsize = sizeof(struct s5p_hash_ctx), | ||
1779 | .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK, | ||
1780 | .cra_module = THIS_MODULE, | ||
1781 | .cra_init = s5p_hash_cra_init, | ||
1782 | .cra_exit = s5p_hash_cra_exit, | ||
1783 | } | ||
1784 | }, | ||
1785 | { | ||
1786 | .init = s5p_hash_init, | ||
1787 | .update = s5p_hash_update, | ||
1788 | .final = s5p_hash_final, | ||
1789 | .finup = s5p_hash_finup, | ||
1790 | .digest = s5p_hash_digest, | ||
1791 | .export = s5p_hash_export, | ||
1792 | .import = s5p_hash_import, | ||
1793 | .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN, | ||
1794 | .halg.digestsize = MD5_DIGEST_SIZE, | ||
1795 | .halg.base = { | ||
1796 | .cra_name = "md5", | ||
1797 | .cra_driver_name = "exynos-md5", | ||
1798 | .cra_priority = 100, | ||
1799 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
1800 | CRYPTO_ALG_KERN_DRIVER_ONLY | | ||
1801 | CRYPTO_ALG_ASYNC | | ||
1802 | CRYPTO_ALG_NEED_FALLBACK, | ||
1803 | .cra_blocksize = HASH_BLOCK_SIZE, | ||
1804 | .cra_ctxsize = sizeof(struct s5p_hash_ctx), | ||
1805 | .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK, | ||
1806 | .cra_module = THIS_MODULE, | ||
1807 | .cra_init = s5p_hash_cra_init, | ||
1808 | .cra_exit = s5p_hash_cra_exit, | ||
1809 | } | ||
1810 | }, | ||
1811 | { | ||
1812 | .init = s5p_hash_init, | ||
1813 | .update = s5p_hash_update, | ||
1814 | .final = s5p_hash_final, | ||
1815 | .finup = s5p_hash_finup, | ||
1816 | .digest = s5p_hash_digest, | ||
1817 | .export = s5p_hash_export, | ||
1818 | .import = s5p_hash_import, | ||
1819 | .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN, | ||
1820 | .halg.digestsize = SHA256_DIGEST_SIZE, | ||
1821 | .halg.base = { | ||
1822 | .cra_name = "sha256", | ||
1823 | .cra_driver_name = "exynos-sha256", | ||
1824 | .cra_priority = 100, | ||
1825 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
1826 | CRYPTO_ALG_KERN_DRIVER_ONLY | | ||
1827 | CRYPTO_ALG_ASYNC | | ||
1828 | CRYPTO_ALG_NEED_FALLBACK, | ||
1829 | .cra_blocksize = HASH_BLOCK_SIZE, | ||
1830 | .cra_ctxsize = sizeof(struct s5p_hash_ctx), | ||
1831 | .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK, | ||
1832 | .cra_module = THIS_MODULE, | ||
1833 | .cra_init = s5p_hash_cra_init, | ||
1834 | .cra_exit = s5p_hash_cra_exit, | ||
1835 | } | ||
1836 | } | ||
1837 | |||
1838 | }; | ||
1839 | |||
515 | static void s5p_set_aes(struct s5p_aes_dev *dev, | 1840 | static void s5p_set_aes(struct s5p_aes_dev *dev, |
516 | uint8_t *key, uint8_t *iv, unsigned int keylen) | 1841 | uint8_t *key, uint8_t *iv, unsigned int keylen) |
517 | { | 1842 | { |
@@ -829,6 +2154,7 @@ static int s5p_aes_probe(struct platform_device *pdev) | |||
829 | struct samsung_aes_variant *variant; | 2154 | struct samsung_aes_variant *variant; |
830 | struct s5p_aes_dev *pdata; | 2155 | struct s5p_aes_dev *pdata; |
831 | struct resource *res; | 2156 | struct resource *res; |
2157 | unsigned int hash_i; | ||
832 | 2158 | ||
833 | if (s5p_dev) | 2159 | if (s5p_dev) |
834 | return -EEXIST; | 2160 | return -EEXIST; |
@@ -837,12 +2163,34 @@ static int s5p_aes_probe(struct platform_device *pdev) | |||
837 | if (!pdata) | 2163 | if (!pdata) |
838 | return -ENOMEM; | 2164 | return -ENOMEM; |
839 | 2165 | ||
2166 | variant = find_s5p_sss_version(pdev); | ||
840 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 2167 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
841 | pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res); | ||
842 | if (IS_ERR(pdata->ioaddr)) | ||
843 | return PTR_ERR(pdata->ioaddr); | ||
844 | 2168 | ||
845 | variant = find_s5p_sss_version(pdev); | 2169 | /* |
2170 | * Note: HASH and PRNG uses the same registers in secss, avoid | ||
2171 | * overwrite each other. This will drop HASH when CONFIG_EXYNOS_RNG | ||
2172 | * is enabled in config. We need larger size for HASH registers in | ||
2173 | * secss, current describe only AES/DES | ||
2174 | */ | ||
2175 | if (IS_ENABLED(CONFIG_CRYPTO_DEV_EXYNOS_HASH)) { | ||
2176 | if (variant == &exynos_aes_data) { | ||
2177 | res->end += 0x300; | ||
2178 | pdata->use_hash = true; | ||
2179 | } | ||
2180 | } | ||
2181 | |||
2182 | pdata->res = res; | ||
2183 | pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res); | ||
2184 | if (IS_ERR(pdata->ioaddr)) { | ||
2185 | if (!pdata->use_hash) | ||
2186 | return PTR_ERR(pdata->ioaddr); | ||
2187 | /* try AES without HASH */ | ||
2188 | res->end -= 0x300; | ||
2189 | pdata->use_hash = false; | ||
2190 | pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res); | ||
2191 | if (IS_ERR(pdata->ioaddr)) | ||
2192 | return PTR_ERR(pdata->ioaddr); | ||
2193 | } | ||
846 | 2194 | ||
847 | pdata->clk = devm_clk_get(dev, "secss"); | 2195 | pdata->clk = devm_clk_get(dev, "secss"); |
848 | if (IS_ERR(pdata->clk)) { | 2196 | if (IS_ERR(pdata->clk)) { |
@@ -857,8 +2205,10 @@ static int s5p_aes_probe(struct platform_device *pdev) | |||
857 | } | 2205 | } |
858 | 2206 | ||
859 | spin_lock_init(&pdata->lock); | 2207 | spin_lock_init(&pdata->lock); |
2208 | spin_lock_init(&pdata->hash_lock); | ||
860 | 2209 | ||
861 | pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset; | 2210 | pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset; |
2211 | pdata->io_hash_base = pdata->ioaddr + variant->hash_offset; | ||
862 | 2212 | ||
863 | pdata->irq_fc = platform_get_irq(pdev, 0); | 2213 | pdata->irq_fc = platform_get_irq(pdev, 0); |
864 | if (pdata->irq_fc < 0) { | 2214 | if (pdata->irq_fc < 0) { |
@@ -888,12 +2238,40 @@ static int s5p_aes_probe(struct platform_device *pdev) | |||
888 | goto err_algs; | 2238 | goto err_algs; |
889 | } | 2239 | } |
890 | 2240 | ||
2241 | if (pdata->use_hash) { | ||
2242 | tasklet_init(&pdata->hash_tasklet, s5p_hash_tasklet_cb, | ||
2243 | (unsigned long)pdata); | ||
2244 | crypto_init_queue(&pdata->hash_queue, SSS_HASH_QUEUE_LENGTH); | ||
2245 | |||
2246 | for (hash_i = 0; hash_i < ARRAY_SIZE(algs_sha1_md5_sha256); | ||
2247 | hash_i++) { | ||
2248 | struct ahash_alg *alg; | ||
2249 | |||
2250 | alg = &algs_sha1_md5_sha256[hash_i]; | ||
2251 | err = crypto_register_ahash(alg); | ||
2252 | if (err) { | ||
2253 | dev_err(dev, "can't register '%s': %d\n", | ||
2254 | alg->halg.base.cra_driver_name, err); | ||
2255 | goto err_hash; | ||
2256 | } | ||
2257 | } | ||
2258 | } | ||
2259 | |||
891 | dev_info(dev, "s5p-sss driver registered\n"); | 2260 | dev_info(dev, "s5p-sss driver registered\n"); |
892 | 2261 | ||
893 | return 0; | 2262 | return 0; |
894 | 2263 | ||
2264 | err_hash: | ||
2265 | for (j = hash_i - 1; j >= 0; j--) | ||
2266 | crypto_unregister_ahash(&algs_sha1_md5_sha256[j]); | ||
2267 | |||
2268 | tasklet_kill(&pdata->hash_tasklet); | ||
2269 | res->end -= 0x300; | ||
2270 | |||
895 | err_algs: | 2271 | err_algs: |
896 | dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name, err); | 2272 | if (i < ARRAY_SIZE(algs)) |
2273 | dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name, | ||
2274 | err); | ||
897 | 2275 | ||
898 | for (j = 0; j < i; j++) | 2276 | for (j = 0; j < i; j++) |
899 | crypto_unregister_alg(&algs[j]); | 2277 | crypto_unregister_alg(&algs[j]); |
@@ -920,9 +2298,16 @@ static int s5p_aes_remove(struct platform_device *pdev) | |||
920 | crypto_unregister_alg(&algs[i]); | 2298 | crypto_unregister_alg(&algs[i]); |
921 | 2299 | ||
922 | tasklet_kill(&pdata->tasklet); | 2300 | tasklet_kill(&pdata->tasklet); |
2301 | if (pdata->use_hash) { | ||
2302 | for (i = ARRAY_SIZE(algs_sha1_md5_sha256) - 1; i >= 0; i--) | ||
2303 | crypto_unregister_ahash(&algs_sha1_md5_sha256[i]); | ||
923 | 2304 | ||
924 | clk_disable_unprepare(pdata->clk); | 2305 | pdata->res->end -= 0x300; |
2306 | tasklet_kill(&pdata->hash_tasklet); | ||
2307 | pdata->use_hash = false; | ||
2308 | } | ||
925 | 2309 | ||
2310 | clk_disable_unprepare(pdata->clk); | ||
926 | s5p_dev = NULL; | 2311 | s5p_dev = NULL; |
927 | 2312 | ||
928 | return 0; | 2313 | return 0; |
@@ -942,3 +2327,4 @@ module_platform_driver(s5p_aes_crypto); | |||
942 | MODULE_DESCRIPTION("S5PV210 AES hw acceleration support."); | 2327 | MODULE_DESCRIPTION("S5PV210 AES hw acceleration support."); |
943 | MODULE_LICENSE("GPL v2"); | 2328 | MODULE_LICENSE("GPL v2"); |
944 | MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>"); | 2329 | MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>"); |
2330 | MODULE_AUTHOR("Kamil Konieczny <k.konieczny@partner.samsung.com>"); | ||
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index 4835dd4a9e50..4ca4a264a833 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c | |||
@@ -895,7 +895,6 @@ static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op) | |||
895 | static int stm32_hash_update(struct ahash_request *req) | 895 | static int stm32_hash_update(struct ahash_request *req) |
896 | { | 896 | { |
897 | struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); | 897 | struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); |
898 | int ret; | ||
899 | 898 | ||
900 | if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU)) | 899 | if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU)) |
901 | return 0; | 900 | return 0; |
@@ -909,12 +908,7 @@ static int stm32_hash_update(struct ahash_request *req) | |||
909 | return 0; | 908 | return 0; |
910 | } | 909 | } |
911 | 910 | ||
912 | ret = stm32_hash_enqueue(req, HASH_OP_UPDATE); | 911 | return stm32_hash_enqueue(req, HASH_OP_UPDATE); |
913 | |||
914 | if (rctx->flags & HASH_FLAGS_FINUP) | ||
915 | return ret; | ||
916 | |||
917 | return 0; | ||
918 | } | 912 | } |
919 | 913 | ||
920 | static int stm32_hash_final(struct ahash_request *req) | 914 | static int stm32_hash_final(struct ahash_request *req) |
@@ -1070,7 +1064,6 @@ static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm) | |||
1070 | static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id) | 1064 | static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id) |
1071 | { | 1065 | { |
1072 | struct stm32_hash_dev *hdev = dev_id; | 1066 | struct stm32_hash_dev *hdev = dev_id; |
1073 | int err; | ||
1074 | 1067 | ||
1075 | if (HASH_FLAGS_CPU & hdev->flags) { | 1068 | if (HASH_FLAGS_CPU & hdev->flags) { |
1076 | if (HASH_FLAGS_OUTPUT_READY & hdev->flags) { | 1069 | if (HASH_FLAGS_OUTPUT_READY & hdev->flags) { |
@@ -1087,8 +1080,8 @@ static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id) | |||
1087 | return IRQ_HANDLED; | 1080 | return IRQ_HANDLED; |
1088 | 1081 | ||
1089 | finish: | 1082 | finish: |
1090 | /*Finish current request */ | 1083 | /* Finish current request */ |
1091 | stm32_hash_finish_req(hdev->req, err); | 1084 | stm32_hash_finish_req(hdev->req, 0); |
1092 | 1085 | ||
1093 | return IRQ_HANDLED; | 1086 | return IRQ_HANDLED; |
1094 | } | 1087 | } |
@@ -1411,11 +1404,10 @@ MODULE_DEVICE_TABLE(of, stm32_hash_of_match); | |||
1411 | static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev, | 1404 | static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev, |
1412 | struct device *dev) | 1405 | struct device *dev) |
1413 | { | 1406 | { |
1414 | const struct of_device_id *match; | ||
1415 | int err; | 1407 | int err; |
1416 | 1408 | ||
1417 | match = of_match_device(stm32_hash_of_match, dev); | 1409 | hdev->pdata = of_device_get_match_data(dev); |
1418 | if (!match) { | 1410 | if (!hdev->pdata) { |
1419 | dev_err(dev, "no compatible OF match\n"); | 1411 | dev_err(dev, "no compatible OF match\n"); |
1420 | return -EINVAL; | 1412 | return -EINVAL; |
1421 | } | 1413 | } |
@@ -1423,8 +1415,6 @@ static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev, | |||
1423 | err = of_property_read_u32(dev->of_node, "dma-maxburst", | 1415 | err = of_property_read_u32(dev->of_node, "dma-maxburst", |
1424 | &hdev->dma_maxburst); | 1416 | &hdev->dma_maxburst); |
1425 | 1417 | ||
1426 | hdev->pdata = match->data; | ||
1427 | |||
1428 | return err; | 1418 | return err; |
1429 | } | 1419 | } |
1430 | 1420 | ||
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index dff88838dce7..9c80e0cb1664 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -56,29 +56,26 @@ | |||
56 | #include "talitos.h" | 56 | #include "talitos.h" |
57 | 57 | ||
58 | static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr, | 58 | static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr, |
59 | bool is_sec1) | 59 | unsigned int len, bool is_sec1) |
60 | { | 60 | { |
61 | ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); | 61 | ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); |
62 | if (!is_sec1) | 62 | if (is_sec1) { |
63 | ptr->len1 = cpu_to_be16(len); | ||
64 | } else { | ||
65 | ptr->len = cpu_to_be16(len); | ||
63 | ptr->eptr = upper_32_bits(dma_addr); | 66 | ptr->eptr = upper_32_bits(dma_addr); |
67 | } | ||
64 | } | 68 | } |
65 | 69 | ||
66 | static void copy_talitos_ptr(struct talitos_ptr *dst_ptr, | 70 | static void copy_talitos_ptr(struct talitos_ptr *dst_ptr, |
67 | struct talitos_ptr *src_ptr, bool is_sec1) | 71 | struct talitos_ptr *src_ptr, bool is_sec1) |
68 | { | 72 | { |
69 | dst_ptr->ptr = src_ptr->ptr; | 73 | dst_ptr->ptr = src_ptr->ptr; |
70 | if (!is_sec1) | ||
71 | dst_ptr->eptr = src_ptr->eptr; | ||
72 | } | ||
73 | |||
74 | static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len, | ||
75 | bool is_sec1) | ||
76 | { | ||
77 | if (is_sec1) { | 74 | if (is_sec1) { |
78 | ptr->res = 0; | 75 | dst_ptr->len1 = src_ptr->len1; |
79 | ptr->len1 = cpu_to_be16(len); | ||
80 | } else { | 76 | } else { |
81 | ptr->len = cpu_to_be16(len); | 77 | dst_ptr->len = src_ptr->len; |
78 | dst_ptr->eptr = src_ptr->eptr; | ||
82 | } | 79 | } |
83 | } | 80 | } |
84 | 81 | ||
@@ -116,9 +113,7 @@ static void map_single_talitos_ptr(struct device *dev, | |||
116 | struct talitos_private *priv = dev_get_drvdata(dev); | 113 | struct talitos_private *priv = dev_get_drvdata(dev); |
117 | bool is_sec1 = has_ftr_sec1(priv); | 114 | bool is_sec1 = has_ftr_sec1(priv); |
118 | 115 | ||
119 | to_talitos_ptr_len(ptr, len, is_sec1); | 116 | to_talitos_ptr(ptr, dma_addr, len, is_sec1); |
120 | to_talitos_ptr(ptr, dma_addr, is_sec1); | ||
121 | to_talitos_ptr_ext_set(ptr, 0, is_sec1); | ||
122 | } | 117 | } |
123 | 118 | ||
124 | /* | 119 | /* |
@@ -165,6 +160,10 @@ static int reset_channel(struct device *dev, int ch) | |||
165 | /* set 36-bit addressing, done writeback enable and done IRQ enable */ | 160 | /* set 36-bit addressing, done writeback enable and done IRQ enable */ |
166 | setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE | | 161 | setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE | |
167 | TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE); | 162 | TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE); |
163 | /* enable chaining descriptors */ | ||
164 | if (is_sec1) | ||
165 | setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, | ||
166 | TALITOS_CCCR_LO_NE); | ||
168 | 167 | ||
169 | /* and ICCR writeback, if available */ | 168 | /* and ICCR writeback, if available */ |
170 | if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) | 169 | if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) |
@@ -287,7 +286,6 @@ int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, | |||
287 | /* map descriptor and save caller data */ | 286 | /* map descriptor and save caller data */ |
288 | if (is_sec1) { | 287 | if (is_sec1) { |
289 | desc->hdr1 = desc->hdr; | 288 | desc->hdr1 = desc->hdr; |
290 | desc->next_desc = 0; | ||
291 | request->dma_desc = dma_map_single(dev, &desc->hdr1, | 289 | request->dma_desc = dma_map_single(dev, &desc->hdr1, |
292 | TALITOS_DESC_SIZE, | 290 | TALITOS_DESC_SIZE, |
293 | DMA_BIDIRECTIONAL); | 291 | DMA_BIDIRECTIONAL); |
@@ -339,7 +337,12 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch) | |||
339 | 337 | ||
340 | /* descriptors with their done bits set don't get the error */ | 338 | /* descriptors with their done bits set don't get the error */ |
341 | rmb(); | 339 | rmb(); |
342 | hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr; | 340 | if (!is_sec1) |
341 | hdr = request->desc->hdr; | ||
342 | else if (request->desc->next_desc) | ||
343 | hdr = (request->desc + 1)->hdr1; | ||
344 | else | ||
345 | hdr = request->desc->hdr1; | ||
343 | 346 | ||
344 | if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE) | 347 | if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE) |
345 | status = 0; | 348 | status = 0; |
@@ -393,8 +396,6 @@ static void talitos1_done_##name(unsigned long data) \ | |||
393 | \ | 396 | \ |
394 | if (ch_done_mask & 0x10000000) \ | 397 | if (ch_done_mask & 0x10000000) \ |
395 | flush_channel(dev, 0, 0, 0); \ | 398 | flush_channel(dev, 0, 0, 0); \ |
396 | if (priv->num_channels == 1) \ | ||
397 | goto out; \ | ||
398 | if (ch_done_mask & 0x40000000) \ | 399 | if (ch_done_mask & 0x40000000) \ |
399 | flush_channel(dev, 1, 0, 0); \ | 400 | flush_channel(dev, 1, 0, 0); \ |
400 | if (ch_done_mask & 0x00010000) \ | 401 | if (ch_done_mask & 0x00010000) \ |
@@ -402,7 +403,6 @@ static void talitos1_done_##name(unsigned long data) \ | |||
402 | if (ch_done_mask & 0x00040000) \ | 403 | if (ch_done_mask & 0x00040000) \ |
403 | flush_channel(dev, 3, 0, 0); \ | 404 | flush_channel(dev, 3, 0, 0); \ |
404 | \ | 405 | \ |
405 | out: \ | ||
406 | /* At this point, all completed channels have been processed */ \ | 406 | /* At this point, all completed channels have been processed */ \ |
407 | /* Unmask done interrupts for channels completed later on. */ \ | 407 | /* Unmask done interrupts for channels completed later on. */ \ |
408 | spin_lock_irqsave(&priv->reg_lock, flags); \ | 408 | spin_lock_irqsave(&priv->reg_lock, flags); \ |
@@ -412,6 +412,7 @@ out: \ | |||
412 | } | 412 | } |
413 | 413 | ||
414 | DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE) | 414 | DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE) |
415 | DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE) | ||
415 | 416 | ||
416 | #define DEF_TALITOS2_DONE(name, ch_done_mask) \ | 417 | #define DEF_TALITOS2_DONE(name, ch_done_mask) \ |
417 | static void talitos2_done_##name(unsigned long data) \ | 418 | static void talitos2_done_##name(unsigned long data) \ |
@@ -422,8 +423,6 @@ static void talitos2_done_##name(unsigned long data) \ | |||
422 | \ | 423 | \ |
423 | if (ch_done_mask & 1) \ | 424 | if (ch_done_mask & 1) \ |
424 | flush_channel(dev, 0, 0, 0); \ | 425 | flush_channel(dev, 0, 0, 0); \ |
425 | if (priv->num_channels == 1) \ | ||
426 | goto out; \ | ||
427 | if (ch_done_mask & (1 << 2)) \ | 426 | if (ch_done_mask & (1 << 2)) \ |
428 | flush_channel(dev, 1, 0, 0); \ | 427 | flush_channel(dev, 1, 0, 0); \ |
429 | if (ch_done_mask & (1 << 4)) \ | 428 | if (ch_done_mask & (1 << 4)) \ |
@@ -431,7 +430,6 @@ static void talitos2_done_##name(unsigned long data) \ | |||
431 | if (ch_done_mask & (1 << 6)) \ | 430 | if (ch_done_mask & (1 << 6)) \ |
432 | flush_channel(dev, 3, 0, 0); \ | 431 | flush_channel(dev, 3, 0, 0); \ |
433 | \ | 432 | \ |
434 | out: \ | ||
435 | /* At this point, all completed channels have been processed */ \ | 433 | /* At this point, all completed channels have been processed */ \ |
436 | /* Unmask done interrupts for channels completed later on. */ \ | 434 | /* Unmask done interrupts for channels completed later on. */ \ |
437 | spin_lock_irqsave(&priv->reg_lock, flags); \ | 435 | spin_lock_irqsave(&priv->reg_lock, flags); \ |
@@ -441,6 +439,7 @@ out: \ | |||
441 | } | 439 | } |
442 | 440 | ||
443 | DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE) | 441 | DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE) |
442 | DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE) | ||
444 | DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE) | 443 | DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE) |
445 | DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE) | 444 | DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE) |
446 | 445 | ||
@@ -464,7 +463,8 @@ static u32 current_desc_hdr(struct device *dev, int ch) | |||
464 | tail = priv->chan[ch].tail; | 463 | tail = priv->chan[ch].tail; |
465 | 464 | ||
466 | iter = tail; | 465 | iter = tail; |
467 | while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) { | 466 | while (priv->chan[ch].fifo[iter].dma_desc != cur_desc && |
467 | priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) { | ||
468 | iter = (iter + 1) & (priv->fifo_len - 1); | 468 | iter = (iter + 1) & (priv->fifo_len - 1); |
469 | if (iter == tail) { | 469 | if (iter == tail) { |
470 | dev_err(dev, "couldn't locate current descriptor\n"); | 470 | dev_err(dev, "couldn't locate current descriptor\n"); |
@@ -472,6 +472,9 @@ static u32 current_desc_hdr(struct device *dev, int ch) | |||
472 | } | 472 | } |
473 | } | 473 | } |
474 | 474 | ||
475 | if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) | ||
476 | return (priv->chan[ch].fifo[iter].desc + 1)->hdr; | ||
477 | |||
475 | return priv->chan[ch].fifo[iter].desc->hdr; | 478 | return priv->chan[ch].fifo[iter].desc->hdr; |
476 | } | 479 | } |
477 | 480 | ||
@@ -825,9 +828,12 @@ struct talitos_ctx { | |||
825 | __be32 desc_hdr_template; | 828 | __be32 desc_hdr_template; |
826 | u8 key[TALITOS_MAX_KEY_SIZE]; | 829 | u8 key[TALITOS_MAX_KEY_SIZE]; |
827 | u8 iv[TALITOS_MAX_IV_LENGTH]; | 830 | u8 iv[TALITOS_MAX_IV_LENGTH]; |
831 | dma_addr_t dma_key; | ||
828 | unsigned int keylen; | 832 | unsigned int keylen; |
829 | unsigned int enckeylen; | 833 | unsigned int enckeylen; |
830 | unsigned int authkeylen; | 834 | unsigned int authkeylen; |
835 | dma_addr_t dma_buf; | ||
836 | dma_addr_t dma_hw_context; | ||
831 | }; | 837 | }; |
832 | 838 | ||
833 | #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE | 839 | #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE |
@@ -836,8 +842,8 @@ struct talitos_ctx { | |||
836 | struct talitos_ahash_req_ctx { | 842 | struct talitos_ahash_req_ctx { |
837 | u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)]; | 843 | u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)]; |
838 | unsigned int hw_context_size; | 844 | unsigned int hw_context_size; |
839 | u8 buf[HASH_MAX_BLOCK_SIZE]; | 845 | u8 buf[2][HASH_MAX_BLOCK_SIZE]; |
840 | u8 bufnext[HASH_MAX_BLOCK_SIZE]; | 846 | int buf_idx; |
841 | unsigned int swinit; | 847 | unsigned int swinit; |
842 | unsigned int first; | 848 | unsigned int first; |
843 | unsigned int last; | 849 | unsigned int last; |
@@ -861,6 +867,7 @@ static int aead_setkey(struct crypto_aead *authenc, | |||
861 | const u8 *key, unsigned int keylen) | 867 | const u8 *key, unsigned int keylen) |
862 | { | 868 | { |
863 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 869 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); |
870 | struct device *dev = ctx->dev; | ||
864 | struct crypto_authenc_keys keys; | 871 | struct crypto_authenc_keys keys; |
865 | 872 | ||
866 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) | 873 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) |
@@ -869,12 +876,17 @@ static int aead_setkey(struct crypto_aead *authenc, | |||
869 | if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE) | 876 | if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE) |
870 | goto badkey; | 877 | goto badkey; |
871 | 878 | ||
879 | if (ctx->keylen) | ||
880 | dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE); | ||
881 | |||
872 | memcpy(ctx->key, keys.authkey, keys.authkeylen); | 882 | memcpy(ctx->key, keys.authkey, keys.authkeylen); |
873 | memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen); | 883 | memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen); |
874 | 884 | ||
875 | ctx->keylen = keys.authkeylen + keys.enckeylen; | 885 | ctx->keylen = keys.authkeylen + keys.enckeylen; |
876 | ctx->enckeylen = keys.enckeylen; | 886 | ctx->enckeylen = keys.enckeylen; |
877 | ctx->authkeylen = keys.authkeylen; | 887 | ctx->authkeylen = keys.authkeylen; |
888 | ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen, | ||
889 | DMA_TO_DEVICE); | ||
878 | 890 | ||
879 | return 0; | 891 | return 0; |
880 | 892 | ||
@@ -948,13 +960,13 @@ static void ipsec_esp_unmap(struct device *dev, | |||
948 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | 960 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); |
949 | struct talitos_ctx *ctx = crypto_aead_ctx(aead); | 961 | struct talitos_ctx *ctx = crypto_aead_ctx(aead); |
950 | unsigned int ivsize = crypto_aead_ivsize(aead); | 962 | unsigned int ivsize = crypto_aead_ivsize(aead); |
963 | bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP; | ||
964 | struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3]; | ||
951 | 965 | ||
952 | if (edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) | 966 | if (is_ipsec_esp) |
953 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], | 967 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], |
954 | DMA_FROM_DEVICE); | 968 | DMA_FROM_DEVICE); |
955 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE); | 969 | unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE); |
956 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); | ||
957 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE); | ||
958 | 970 | ||
959 | talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen, | 971 | talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen, |
960 | areq->assoclen); | 972 | areq->assoclen); |
@@ -963,7 +975,7 @@ static void ipsec_esp_unmap(struct device *dev, | |||
963 | dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, | 975 | dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, |
964 | DMA_BIDIRECTIONAL); | 976 | DMA_BIDIRECTIONAL); |
965 | 977 | ||
966 | if (!(edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)) { | 978 | if (!is_ipsec_esp) { |
967 | unsigned int dst_nents = edesc->dst_nents ? : 1; | 979 | unsigned int dst_nents = edesc->dst_nents ? : 1; |
968 | 980 | ||
969 | sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize, | 981 | sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize, |
@@ -983,6 +995,7 @@ static void ipsec_esp_encrypt_done(struct device *dev, | |||
983 | struct aead_request *areq = context; | 995 | struct aead_request *areq = context; |
984 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); | 996 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); |
985 | unsigned int authsize = crypto_aead_authsize(authenc); | 997 | unsigned int authsize = crypto_aead_authsize(authenc); |
998 | unsigned int ivsize = crypto_aead_ivsize(authenc); | ||
986 | struct talitos_edesc *edesc; | 999 | struct talitos_edesc *edesc; |
987 | struct scatterlist *sg; | 1000 | struct scatterlist *sg; |
988 | void *icvdata; | 1001 | void *icvdata; |
@@ -1003,6 +1016,8 @@ static void ipsec_esp_encrypt_done(struct device *dev, | |||
1003 | icvdata, authsize); | 1016 | icvdata, authsize); |
1004 | } | 1017 | } |
1005 | 1018 | ||
1019 | dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE); | ||
1020 | |||
1006 | kfree(edesc); | 1021 | kfree(edesc); |
1007 | 1022 | ||
1008 | aead_request_complete(areq, err); | 1023 | aead_request_complete(areq, err); |
@@ -1097,8 +1112,7 @@ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count, | |||
1097 | len = cryptlen; | 1112 | len = cryptlen; |
1098 | 1113 | ||
1099 | to_talitos_ptr(link_tbl_ptr + count, | 1114 | to_talitos_ptr(link_tbl_ptr + count, |
1100 | sg_dma_address(sg) + offset, 0); | 1115 | sg_dma_address(sg) + offset, len, 0); |
1101 | to_talitos_ptr_len(link_tbl_ptr + count, len, 0); | ||
1102 | to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0); | 1116 | to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0); |
1103 | count++; | 1117 | count++; |
1104 | cryptlen -= len; | 1118 | cryptlen -= len; |
@@ -1116,7 +1130,7 @@ next: | |||
1116 | return count; | 1130 | return count; |
1117 | } | 1131 | } |
1118 | 1132 | ||
1119 | int talitos_sg_map(struct device *dev, struct scatterlist *src, | 1133 | static int talitos_sg_map(struct device *dev, struct scatterlist *src, |
1120 | unsigned int len, struct talitos_edesc *edesc, | 1134 | unsigned int len, struct talitos_edesc *edesc, |
1121 | struct talitos_ptr *ptr, | 1135 | struct talitos_ptr *ptr, |
1122 | int sg_count, unsigned int offset, int tbl_off) | 1136 | int sg_count, unsigned int offset, int tbl_off) |
@@ -1124,15 +1138,12 @@ int talitos_sg_map(struct device *dev, struct scatterlist *src, | |||
1124 | struct talitos_private *priv = dev_get_drvdata(dev); | 1138 | struct talitos_private *priv = dev_get_drvdata(dev); |
1125 | bool is_sec1 = has_ftr_sec1(priv); | 1139 | bool is_sec1 = has_ftr_sec1(priv); |
1126 | 1140 | ||
1127 | to_talitos_ptr_len(ptr, len, is_sec1); | ||
1128 | to_talitos_ptr_ext_set(ptr, 0, is_sec1); | ||
1129 | |||
1130 | if (sg_count == 1) { | 1141 | if (sg_count == 1) { |
1131 | to_talitos_ptr(ptr, sg_dma_address(src) + offset, is_sec1); | 1142 | to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1); |
1132 | return sg_count; | 1143 | return sg_count; |
1133 | } | 1144 | } |
1134 | if (is_sec1) { | 1145 | if (is_sec1) { |
1135 | to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, is_sec1); | 1146 | to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1); |
1136 | return sg_count; | 1147 | return sg_count; |
1137 | } | 1148 | } |
1138 | sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, | 1149 | sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, |
@@ -1143,7 +1154,7 @@ int talitos_sg_map(struct device *dev, struct scatterlist *src, | |||
1143 | return sg_count; | 1154 | return sg_count; |
1144 | } | 1155 | } |
1145 | to_talitos_ptr(ptr, edesc->dma_link_tbl + | 1156 | to_talitos_ptr(ptr, edesc->dma_link_tbl + |
1146 | tbl_off * sizeof(struct talitos_ptr), is_sec1); | 1157 | tbl_off * sizeof(struct talitos_ptr), len, is_sec1); |
1147 | to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1); | 1158 | to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1); |
1148 | 1159 | ||
1149 | return sg_count; | 1160 | return sg_count; |
@@ -1170,10 +1181,12 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1170 | bool sync_needed = false; | 1181 | bool sync_needed = false; |
1171 | struct talitos_private *priv = dev_get_drvdata(dev); | 1182 | struct talitos_private *priv = dev_get_drvdata(dev); |
1172 | bool is_sec1 = has_ftr_sec1(priv); | 1183 | bool is_sec1 = has_ftr_sec1(priv); |
1184 | bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP; | ||
1185 | struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3]; | ||
1186 | struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2]; | ||
1173 | 1187 | ||
1174 | /* hmac key */ | 1188 | /* hmac key */ |
1175 | map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, | 1189 | to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1); |
1176 | DMA_TO_DEVICE); | ||
1177 | 1190 | ||
1178 | sg_count = edesc->src_nents ?: 1; | 1191 | sg_count = edesc->src_nents ?: 1; |
1179 | if (is_sec1 && sg_count > 1) | 1192 | if (is_sec1 && sg_count > 1) |
@@ -1194,25 +1207,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1194 | } | 1207 | } |
1195 | 1208 | ||
1196 | /* cipher iv */ | 1209 | /* cipher iv */ |
1197 | if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) { | 1210 | to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1); |
1198 | to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, is_sec1); | ||
1199 | to_talitos_ptr_len(&desc->ptr[2], ivsize, is_sec1); | ||
1200 | to_talitos_ptr_ext_set(&desc->ptr[2], 0, is_sec1); | ||
1201 | } else { | ||
1202 | to_talitos_ptr(&desc->ptr[3], edesc->iv_dma, is_sec1); | ||
1203 | to_talitos_ptr_len(&desc->ptr[3], ivsize, is_sec1); | ||
1204 | to_talitos_ptr_ext_set(&desc->ptr[3], 0, is_sec1); | ||
1205 | } | ||
1206 | 1211 | ||
1207 | /* cipher key */ | 1212 | /* cipher key */ |
1208 | if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) | 1213 | to_talitos_ptr(ckey_ptr, ctx->dma_key + ctx->authkeylen, |
1209 | map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen, | 1214 | ctx->enckeylen, is_sec1); |
1210 | (char *)&ctx->key + ctx->authkeylen, | ||
1211 | DMA_TO_DEVICE); | ||
1212 | else | ||
1213 | map_single_talitos_ptr(dev, &desc->ptr[2], ctx->enckeylen, | ||
1214 | (char *)&ctx->key + ctx->authkeylen, | ||
1215 | DMA_TO_DEVICE); | ||
1216 | 1215 | ||
1217 | /* | 1216 | /* |
1218 | * cipher in | 1217 | * cipher in |
@@ -1220,24 +1219,20 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1220 | * extent is bytes of HMAC postpended to ciphertext, | 1219 | * extent is bytes of HMAC postpended to ciphertext, |
1221 | * typically 12 for ipsec | 1220 | * typically 12 for ipsec |
1222 | */ | 1221 | */ |
1223 | to_talitos_ptr_len(&desc->ptr[4], cryptlen, is_sec1); | ||
1224 | to_talitos_ptr_ext_set(&desc->ptr[4], 0, is_sec1); | ||
1225 | |||
1226 | sg_link_tbl_len = cryptlen; | 1222 | sg_link_tbl_len = cryptlen; |
1227 | 1223 | ||
1228 | if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) { | 1224 | if (is_ipsec_esp) { |
1229 | to_talitos_ptr_ext_set(&desc->ptr[4], authsize, is_sec1); | 1225 | to_talitos_ptr_ext_set(&desc->ptr[4], authsize, is_sec1); |
1230 | 1226 | ||
1231 | if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) | 1227 | if (desc->hdr & DESC_HDR_MODE1_MDEU_CICV) |
1232 | sg_link_tbl_len += authsize; | 1228 | sg_link_tbl_len += authsize; |
1233 | } | 1229 | } |
1234 | 1230 | ||
1235 | sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc, | 1231 | ret = talitos_sg_map(dev, areq->src, sg_link_tbl_len, edesc, |
1236 | &desc->ptr[4], sg_count, areq->assoclen, | 1232 | &desc->ptr[4], sg_count, areq->assoclen, tbl_off); |
1237 | tbl_off); | ||
1238 | 1233 | ||
1239 | if (sg_count > 1) { | 1234 | if (ret > 1) { |
1240 | tbl_off += sg_count; | 1235 | tbl_off += ret; |
1241 | sync_needed = true; | 1236 | sync_needed = true; |
1242 | } | 1237 | } |
1243 | 1238 | ||
@@ -1248,47 +1243,59 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1248 | dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE); | 1243 | dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE); |
1249 | } | 1244 | } |
1250 | 1245 | ||
1251 | sg_count = talitos_sg_map(dev, areq->dst, cryptlen, edesc, | 1246 | ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5], |
1252 | &desc->ptr[5], sg_count, areq->assoclen, | 1247 | sg_count, areq->assoclen, tbl_off); |
1253 | tbl_off); | ||
1254 | 1248 | ||
1255 | if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) | 1249 | if (is_ipsec_esp) |
1256 | to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1); | 1250 | to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1); |
1257 | 1251 | ||
1258 | if (sg_count > 1) { | 1252 | /* ICV data */ |
1253 | if (ret > 1) { | ||
1254 | tbl_off += ret; | ||
1259 | edesc->icv_ool = true; | 1255 | edesc->icv_ool = true; |
1260 | sync_needed = true; | 1256 | sync_needed = true; |
1261 | 1257 | ||
1262 | if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) { | 1258 | if (is_ipsec_esp) { |
1263 | struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; | 1259 | struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; |
1264 | int offset = (edesc->src_nents + edesc->dst_nents + 2) * | 1260 | int offset = (edesc->src_nents + edesc->dst_nents + 2) * |
1265 | sizeof(struct talitos_ptr) + authsize; | 1261 | sizeof(struct talitos_ptr) + authsize; |
1266 | 1262 | ||
1267 | /* Add an entry to the link table for ICV data */ | 1263 | /* Add an entry to the link table for ICV data */ |
1268 | tbl_ptr += sg_count - 1; | 1264 | to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1); |
1269 | to_talitos_ptr_ext_set(tbl_ptr, 0, is_sec1); | ||
1270 | tbl_ptr++; | ||
1271 | to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN, | 1265 | to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN, |
1272 | is_sec1); | 1266 | is_sec1); |
1273 | to_talitos_ptr_len(tbl_ptr, authsize, is_sec1); | ||
1274 | 1267 | ||
1275 | /* icv data follows link tables */ | 1268 | /* icv data follows link tables */ |
1276 | to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset, | 1269 | to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset, |
1277 | is_sec1); | 1270 | authsize, is_sec1); |
1271 | } else { | ||
1272 | dma_addr_t addr = edesc->dma_link_tbl; | ||
1273 | |||
1274 | if (is_sec1) | ||
1275 | addr += areq->assoclen + cryptlen; | ||
1276 | else | ||
1277 | addr += sizeof(struct talitos_ptr) * tbl_off; | ||
1278 | |||
1279 | to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1); | ||
1280 | } | ||
1281 | } else if (!is_ipsec_esp) { | ||
1282 | ret = talitos_sg_map(dev, areq->dst, authsize, edesc, | ||
1283 | &desc->ptr[6], sg_count, areq->assoclen + | ||
1284 | cryptlen, | ||
1285 | tbl_off); | ||
1286 | if (ret > 1) { | ||
1287 | tbl_off += ret; | ||
1288 | edesc->icv_ool = true; | ||
1289 | sync_needed = true; | ||
1290 | } else { | ||
1291 | edesc->icv_ool = false; | ||
1278 | } | 1292 | } |
1279 | } else { | 1293 | } else { |
1280 | edesc->icv_ool = false; | 1294 | edesc->icv_ool = false; |
1281 | } | 1295 | } |
1282 | 1296 | ||
1283 | /* ICV data */ | ||
1284 | if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) { | ||
1285 | to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1); | ||
1286 | to_talitos_ptr(&desc->ptr[6], edesc->dma_link_tbl + | ||
1287 | areq->assoclen + cryptlen, is_sec1); | ||
1288 | } | ||
1289 | |||
1290 | /* iv out */ | 1297 | /* iv out */ |
1291 | if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) | 1298 | if (is_ipsec_esp) |
1292 | map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, | 1299 | map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, |
1293 | DMA_FROM_DEVICE); | 1300 | DMA_FROM_DEVICE); |
1294 | 1301 | ||
@@ -1387,22 +1394,31 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1387 | alloc_len += icv_stashing ? authsize : 0; | 1394 | alloc_len += icv_stashing ? authsize : 0; |
1388 | } | 1395 | } |
1389 | 1396 | ||
1397 | /* if its a ahash, add space for a second desc next to the first one */ | ||
1398 | if (is_sec1 && !dst) | ||
1399 | alloc_len += sizeof(struct talitos_desc); | ||
1400 | |||
1390 | edesc = kmalloc(alloc_len, GFP_DMA | flags); | 1401 | edesc = kmalloc(alloc_len, GFP_DMA | flags); |
1391 | if (!edesc) { | 1402 | if (!edesc) { |
1392 | dev_err(dev, "could not allocate edescriptor\n"); | 1403 | dev_err(dev, "could not allocate edescriptor\n"); |
1393 | err = ERR_PTR(-ENOMEM); | 1404 | err = ERR_PTR(-ENOMEM); |
1394 | goto error_sg; | 1405 | goto error_sg; |
1395 | } | 1406 | } |
1407 | memset(&edesc->desc, 0, sizeof(edesc->desc)); | ||
1396 | 1408 | ||
1397 | edesc->src_nents = src_nents; | 1409 | edesc->src_nents = src_nents; |
1398 | edesc->dst_nents = dst_nents; | 1410 | edesc->dst_nents = dst_nents; |
1399 | edesc->iv_dma = iv_dma; | 1411 | edesc->iv_dma = iv_dma; |
1400 | edesc->dma_len = dma_len; | 1412 | edesc->dma_len = dma_len; |
1401 | if (dma_len) | 1413 | if (dma_len) { |
1402 | edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0], | 1414 | void *addr = &edesc->link_tbl[0]; |
1415 | |||
1416 | if (is_sec1 && !dst) | ||
1417 | addr += sizeof(struct talitos_desc); | ||
1418 | edesc->dma_link_tbl = dma_map_single(dev, addr, | ||
1403 | edesc->dma_len, | 1419 | edesc->dma_len, |
1404 | DMA_BIDIRECTIONAL); | 1420 | DMA_BIDIRECTIONAL); |
1405 | 1421 | } | |
1406 | return edesc; | 1422 | return edesc; |
1407 | error_sg: | 1423 | error_sg: |
1408 | if (iv_dma) | 1424 | if (iv_dma) |
@@ -1468,7 +1484,6 @@ static int aead_decrypt(struct aead_request *req) | |||
1468 | DESC_HDR_MODE1_MDEU_CICV; | 1484 | DESC_HDR_MODE1_MDEU_CICV; |
1469 | 1485 | ||
1470 | /* reset integrity check result bits */ | 1486 | /* reset integrity check result bits */ |
1471 | edesc->desc.hdr_lo = 0; | ||
1472 | 1487 | ||
1473 | return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done); | 1488 | return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done); |
1474 | } | 1489 | } |
@@ -1494,15 +1509,29 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, | |||
1494 | const u8 *key, unsigned int keylen) | 1509 | const u8 *key, unsigned int keylen) |
1495 | { | 1510 | { |
1496 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); | 1511 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); |
1512 | struct device *dev = ctx->dev; | ||
1513 | u32 tmp[DES_EXPKEY_WORDS]; | ||
1497 | 1514 | ||
1498 | if (keylen > TALITOS_MAX_KEY_SIZE) { | 1515 | if (keylen > TALITOS_MAX_KEY_SIZE) { |
1499 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | 1516 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); |
1500 | return -EINVAL; | 1517 | return -EINVAL; |
1501 | } | 1518 | } |
1502 | 1519 | ||
1520 | if (unlikely(crypto_ablkcipher_get_flags(cipher) & | ||
1521 | CRYPTO_TFM_REQ_WEAK_KEY) && | ||
1522 | !des_ekey(tmp, key)) { | ||
1523 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY); | ||
1524 | return -EINVAL; | ||
1525 | } | ||
1526 | |||
1527 | if (ctx->keylen) | ||
1528 | dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE); | ||
1529 | |||
1503 | memcpy(&ctx->key, key, keylen); | 1530 | memcpy(&ctx->key, key, keylen); |
1504 | ctx->keylen = keylen; | 1531 | ctx->keylen = keylen; |
1505 | 1532 | ||
1533 | ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE); | ||
1534 | |||
1506 | return 0; | 1535 | return 0; |
1507 | } | 1536 | } |
1508 | 1537 | ||
@@ -1513,7 +1542,6 @@ static void common_nonsnoop_unmap(struct device *dev, | |||
1513 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); | 1542 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); |
1514 | 1543 | ||
1515 | talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0); | 1544 | talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0); |
1516 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); | ||
1517 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE); | 1545 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE); |
1518 | 1546 | ||
1519 | if (edesc->dma_len) | 1547 | if (edesc->dma_len) |
@@ -1555,16 +1583,12 @@ static int common_nonsnoop(struct talitos_edesc *edesc, | |||
1555 | bool is_sec1 = has_ftr_sec1(priv); | 1583 | bool is_sec1 = has_ftr_sec1(priv); |
1556 | 1584 | ||
1557 | /* first DWORD empty */ | 1585 | /* first DWORD empty */ |
1558 | desc->ptr[0] = zero_entry; | ||
1559 | 1586 | ||
1560 | /* cipher iv */ | 1587 | /* cipher iv */ |
1561 | to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1); | 1588 | to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1); |
1562 | to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1); | ||
1563 | to_talitos_ptr_ext_set(&desc->ptr[1], 0, is_sec1); | ||
1564 | 1589 | ||
1565 | /* cipher key */ | 1590 | /* cipher key */ |
1566 | map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, | 1591 | to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1); |
1567 | (char *)&ctx->key, DMA_TO_DEVICE); | ||
1568 | 1592 | ||
1569 | sg_count = edesc->src_nents ?: 1; | 1593 | sg_count = edesc->src_nents ?: 1; |
1570 | if (is_sec1 && sg_count > 1) | 1594 | if (is_sec1 && sg_count > 1) |
@@ -1599,7 +1623,6 @@ static int common_nonsnoop(struct talitos_edesc *edesc, | |||
1599 | DMA_FROM_DEVICE); | 1623 | DMA_FROM_DEVICE); |
1600 | 1624 | ||
1601 | /* last DWORD empty */ | 1625 | /* last DWORD empty */ |
1602 | desc->ptr[6] = zero_entry; | ||
1603 | 1626 | ||
1604 | if (sync_needed) | 1627 | if (sync_needed) |
1605 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, | 1628 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, |
@@ -1663,26 +1686,16 @@ static void common_nonsnoop_hash_unmap(struct device *dev, | |||
1663 | struct ahash_request *areq) | 1686 | struct ahash_request *areq) |
1664 | { | 1687 | { |
1665 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | 1688 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); |
1666 | struct talitos_private *priv = dev_get_drvdata(dev); | ||
1667 | bool is_sec1 = has_ftr_sec1(priv); | ||
1668 | |||
1669 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); | ||
1670 | 1689 | ||
1671 | talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0); | 1690 | talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0); |
1672 | 1691 | ||
1673 | /* When using hashctx-in, must unmap it. */ | ||
1674 | if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1)) | ||
1675 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], | ||
1676 | DMA_TO_DEVICE); | ||
1677 | |||
1678 | if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1)) | ||
1679 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], | ||
1680 | DMA_TO_DEVICE); | ||
1681 | |||
1682 | if (edesc->dma_len) | 1692 | if (edesc->dma_len) |
1683 | dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, | 1693 | dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, |
1684 | DMA_BIDIRECTIONAL); | 1694 | DMA_BIDIRECTIONAL); |
1685 | 1695 | ||
1696 | if (edesc->desc.next_desc) | ||
1697 | dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc), | ||
1698 | TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL); | ||
1686 | } | 1699 | } |
1687 | 1700 | ||
1688 | static void ahash_done(struct device *dev, | 1701 | static void ahash_done(struct device *dev, |
@@ -1696,7 +1709,7 @@ static void ahash_done(struct device *dev, | |||
1696 | 1709 | ||
1697 | if (!req_ctx->last && req_ctx->to_hash_later) { | 1710 | if (!req_ctx->last && req_ctx->to_hash_later) { |
1698 | /* Position any partial block for next update/final/finup */ | 1711 | /* Position any partial block for next update/final/finup */ |
1699 | memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later); | 1712 | req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1; |
1700 | req_ctx->nbuf = req_ctx->to_hash_later; | 1713 | req_ctx->nbuf = req_ctx->to_hash_later; |
1701 | } | 1714 | } |
1702 | common_nonsnoop_hash_unmap(dev, edesc, areq); | 1715 | common_nonsnoop_hash_unmap(dev, edesc, areq); |
@@ -1710,7 +1723,7 @@ static void ahash_done(struct device *dev, | |||
1710 | * SEC1 doesn't like hashing of 0 sized message, so we do the padding | 1723 | * SEC1 doesn't like hashing of 0 sized message, so we do the padding |
1711 | * ourself and submit a padded block | 1724 | * ourself and submit a padded block |
1712 | */ | 1725 | */ |
1713 | void talitos_handle_buggy_hash(struct talitos_ctx *ctx, | 1726 | static void talitos_handle_buggy_hash(struct talitos_ctx *ctx, |
1714 | struct talitos_edesc *edesc, | 1727 | struct talitos_edesc *edesc, |
1715 | struct talitos_ptr *ptr) | 1728 | struct talitos_ptr *ptr) |
1716 | { | 1729 | { |
@@ -1729,6 +1742,7 @@ void talitos_handle_buggy_hash(struct talitos_ctx *ctx, | |||
1729 | 1742 | ||
1730 | static int common_nonsnoop_hash(struct talitos_edesc *edesc, | 1743 | static int common_nonsnoop_hash(struct talitos_edesc *edesc, |
1731 | struct ahash_request *areq, unsigned int length, | 1744 | struct ahash_request *areq, unsigned int length, |
1745 | unsigned int offset, | ||
1732 | void (*callback) (struct device *dev, | 1746 | void (*callback) (struct device *dev, |
1733 | struct talitos_desc *desc, | 1747 | struct talitos_desc *desc, |
1734 | void *context, int error)) | 1748 | void *context, int error)) |
@@ -1745,44 +1759,48 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, | |||
1745 | int sg_count; | 1759 | int sg_count; |
1746 | 1760 | ||
1747 | /* first DWORD empty */ | 1761 | /* first DWORD empty */ |
1748 | desc->ptr[0] = zero_entry; | ||
1749 | 1762 | ||
1750 | /* hash context in */ | 1763 | /* hash context in */ |
1751 | if (!req_ctx->first || req_ctx->swinit) { | 1764 | if (!req_ctx->first || req_ctx->swinit) { |
1752 | map_single_talitos_ptr(dev, &desc->ptr[1], | 1765 | to_talitos_ptr(&desc->ptr[1], ctx->dma_hw_context, |
1753 | req_ctx->hw_context_size, | 1766 | req_ctx->hw_context_size, is_sec1); |
1754 | (char *)req_ctx->hw_context, | ||
1755 | DMA_TO_DEVICE); | ||
1756 | req_ctx->swinit = 0; | 1767 | req_ctx->swinit = 0; |
1757 | } else { | ||
1758 | desc->ptr[1] = zero_entry; | ||
1759 | } | 1768 | } |
1760 | /* Indicate next op is not the first. */ | 1769 | /* Indicate next op is not the first. */ |
1761 | req_ctx->first = 0; | 1770 | req_ctx->first = 0; |
1762 | 1771 | ||
1763 | /* HMAC key */ | 1772 | /* HMAC key */ |
1764 | if (ctx->keylen) | 1773 | if (ctx->keylen) |
1765 | map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, | 1774 | to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, |
1766 | (char *)&ctx->key, DMA_TO_DEVICE); | 1775 | is_sec1); |
1767 | else | 1776 | |
1768 | desc->ptr[2] = zero_entry; | 1777 | if (is_sec1 && req_ctx->nbuf) |
1778 | length -= req_ctx->nbuf; | ||
1769 | 1779 | ||
1770 | sg_count = edesc->src_nents ?: 1; | 1780 | sg_count = edesc->src_nents ?: 1; |
1771 | if (is_sec1 && sg_count > 1) | 1781 | if (is_sec1 && sg_count > 1) |
1772 | sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length); | 1782 | sg_pcopy_to_buffer(req_ctx->psrc, sg_count, |
1773 | else | 1783 | edesc->buf + sizeof(struct talitos_desc), |
1784 | length, req_ctx->nbuf); | ||
1785 | else if (length) | ||
1774 | sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count, | 1786 | sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count, |
1775 | DMA_TO_DEVICE); | 1787 | DMA_TO_DEVICE); |
1776 | /* | 1788 | /* |
1777 | * data in | 1789 | * data in |
1778 | */ | 1790 | */ |
1779 | sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc, | 1791 | if (is_sec1 && req_ctx->nbuf) { |
1780 | &desc->ptr[3], sg_count, 0, 0); | 1792 | dma_addr_t dma_buf = ctx->dma_buf + req_ctx->buf_idx * |
1781 | if (sg_count > 1) | 1793 | HASH_MAX_BLOCK_SIZE; |
1782 | sync_needed = true; | 1794 | |
1795 | to_talitos_ptr(&desc->ptr[3], dma_buf, req_ctx->nbuf, is_sec1); | ||
1796 | } else { | ||
1797 | sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc, | ||
1798 | &desc->ptr[3], sg_count, offset, 0); | ||
1799 | if (sg_count > 1) | ||
1800 | sync_needed = true; | ||
1801 | } | ||
1783 | 1802 | ||
1784 | /* fifth DWORD empty */ | 1803 | /* fifth DWORD empty */ |
1785 | desc->ptr[4] = zero_entry; | ||
1786 | 1804 | ||
1787 | /* hash/HMAC out -or- hash context out */ | 1805 | /* hash/HMAC out -or- hash context out */ |
1788 | if (req_ctx->last) | 1806 | if (req_ctx->last) |
@@ -1790,16 +1808,44 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, | |||
1790 | crypto_ahash_digestsize(tfm), | 1808 | crypto_ahash_digestsize(tfm), |
1791 | areq->result, DMA_FROM_DEVICE); | 1809 | areq->result, DMA_FROM_DEVICE); |
1792 | else | 1810 | else |
1793 | map_single_talitos_ptr(dev, &desc->ptr[5], | 1811 | to_talitos_ptr(&desc->ptr[5], ctx->dma_hw_context, |
1794 | req_ctx->hw_context_size, | 1812 | req_ctx->hw_context_size, is_sec1); |
1795 | req_ctx->hw_context, DMA_FROM_DEVICE); | ||
1796 | 1813 | ||
1797 | /* last DWORD empty */ | 1814 | /* last DWORD empty */ |
1798 | desc->ptr[6] = zero_entry; | ||
1799 | 1815 | ||
1800 | if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0) | 1816 | if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0) |
1801 | talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]); | 1817 | talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]); |
1802 | 1818 | ||
1819 | if (is_sec1 && req_ctx->nbuf && length) { | ||
1820 | struct talitos_desc *desc2 = desc + 1; | ||
1821 | dma_addr_t next_desc; | ||
1822 | |||
1823 | memset(desc2, 0, sizeof(*desc2)); | ||
1824 | desc2->hdr = desc->hdr; | ||
1825 | desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT; | ||
1826 | desc2->hdr1 = desc2->hdr; | ||
1827 | desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD; | ||
1828 | desc->hdr |= DESC_HDR_MODE0_MDEU_CONT; | ||
1829 | desc->hdr &= ~DESC_HDR_DONE_NOTIFY; | ||
1830 | |||
1831 | to_talitos_ptr(&desc2->ptr[1], ctx->dma_hw_context, | ||
1832 | req_ctx->hw_context_size, is_sec1); | ||
1833 | |||
1834 | copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1); | ||
1835 | sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc, | ||
1836 | &desc2->ptr[3], sg_count, offset, 0); | ||
1837 | if (sg_count > 1) | ||
1838 | sync_needed = true; | ||
1839 | copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1); | ||
1840 | if (req_ctx->last) | ||
1841 | to_talitos_ptr(&desc->ptr[5], ctx->dma_hw_context, | ||
1842 | req_ctx->hw_context_size, is_sec1); | ||
1843 | |||
1844 | next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE, | ||
1845 | DMA_BIDIRECTIONAL); | ||
1846 | desc->next_desc = cpu_to_be32(next_desc); | ||
1847 | } | ||
1848 | |||
1803 | if (sync_needed) | 1849 | if (sync_needed) |
1804 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, | 1850 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, |
1805 | edesc->dma_len, DMA_BIDIRECTIONAL); | 1851 | edesc->dma_len, DMA_BIDIRECTIONAL); |
@@ -1818,6 +1864,11 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq, | |||
1818 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); | 1864 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); |
1819 | struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); | 1865 | struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); |
1820 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | 1866 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); |
1867 | struct talitos_private *priv = dev_get_drvdata(ctx->dev); | ||
1868 | bool is_sec1 = has_ftr_sec1(priv); | ||
1869 | |||
1870 | if (is_sec1) | ||
1871 | nbytes -= req_ctx->nbuf; | ||
1821 | 1872 | ||
1822 | return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0, | 1873 | return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0, |
1823 | nbytes, 0, 0, 0, areq->base.flags, false); | 1874 | nbytes, 0, 0, 0, areq->base.flags, false); |
@@ -1826,17 +1877,35 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq, | |||
1826 | static int ahash_init(struct ahash_request *areq) | 1877 | static int ahash_init(struct ahash_request *areq) |
1827 | { | 1878 | { |
1828 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); | 1879 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); |
1880 | struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); | ||
1881 | struct device *dev = ctx->dev; | ||
1829 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | 1882 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); |
1883 | unsigned int size; | ||
1884 | struct talitos_private *priv = dev_get_drvdata(dev); | ||
1885 | bool is_sec1 = has_ftr_sec1(priv); | ||
1830 | 1886 | ||
1831 | /* Initialize the context */ | 1887 | /* Initialize the context */ |
1888 | req_ctx->buf_idx = 0; | ||
1832 | req_ctx->nbuf = 0; | 1889 | req_ctx->nbuf = 0; |
1833 | req_ctx->first = 1; /* first indicates h/w must init its context */ | 1890 | req_ctx->first = 1; /* first indicates h/w must init its context */ |
1834 | req_ctx->swinit = 0; /* assume h/w init of context */ | 1891 | req_ctx->swinit = 0; /* assume h/w init of context */ |
1835 | req_ctx->hw_context_size = | 1892 | size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE) |
1836 | (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE) | ||
1837 | ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 | 1893 | ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 |
1838 | : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512; | 1894 | : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512; |
1895 | req_ctx->hw_context_size = size; | ||
1839 | 1896 | ||
1897 | if (ctx->dma_hw_context) | ||
1898 | dma_unmap_single(dev, ctx->dma_hw_context, size, | ||
1899 | DMA_BIDIRECTIONAL); | ||
1900 | ctx->dma_hw_context = dma_map_single(dev, req_ctx->hw_context, size, | ||
1901 | DMA_BIDIRECTIONAL); | ||
1902 | if (ctx->dma_buf) | ||
1903 | dma_unmap_single(dev, ctx->dma_buf, sizeof(req_ctx->buf), | ||
1904 | DMA_TO_DEVICE); | ||
1905 | if (is_sec1) | ||
1906 | ctx->dma_buf = dma_map_single(dev, req_ctx->buf, | ||
1907 | sizeof(req_ctx->buf), | ||
1908 | DMA_TO_DEVICE); | ||
1840 | return 0; | 1909 | return 0; |
1841 | } | 1910 | } |
1842 | 1911 | ||
@@ -1847,6 +1916,9 @@ static int ahash_init(struct ahash_request *areq) | |||
1847 | static int ahash_init_sha224_swinit(struct ahash_request *areq) | 1916 | static int ahash_init_sha224_swinit(struct ahash_request *areq) |
1848 | { | 1917 | { |
1849 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | 1918 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); |
1919 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); | ||
1920 | struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); | ||
1921 | struct device *dev = ctx->dev; | ||
1850 | 1922 | ||
1851 | ahash_init(areq); | 1923 | ahash_init(areq); |
1852 | req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/ | 1924 | req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/ |
@@ -1864,6 +1936,9 @@ static int ahash_init_sha224_swinit(struct ahash_request *areq) | |||
1864 | req_ctx->hw_context[8] = 0; | 1936 | req_ctx->hw_context[8] = 0; |
1865 | req_ctx->hw_context[9] = 0; | 1937 | req_ctx->hw_context[9] = 0; |
1866 | 1938 | ||
1939 | dma_sync_single_for_device(dev, ctx->dma_hw_context, | ||
1940 | req_ctx->hw_context_size, DMA_TO_DEVICE); | ||
1941 | |||
1867 | return 0; | 1942 | return 0; |
1868 | } | 1943 | } |
1869 | 1944 | ||
@@ -1879,6 +1954,11 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) | |||
1879 | unsigned int to_hash_later; | 1954 | unsigned int to_hash_later; |
1880 | unsigned int nsg; | 1955 | unsigned int nsg; |
1881 | int nents; | 1956 | int nents; |
1957 | struct device *dev = ctx->dev; | ||
1958 | struct talitos_private *priv = dev_get_drvdata(dev); | ||
1959 | bool is_sec1 = has_ftr_sec1(priv); | ||
1960 | int offset = 0; | ||
1961 | u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx]; | ||
1882 | 1962 | ||
1883 | if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) { | 1963 | if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) { |
1884 | /* Buffer up to one whole block */ | 1964 | /* Buffer up to one whole block */ |
@@ -1888,7 +1968,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) | |||
1888 | return nents; | 1968 | return nents; |
1889 | } | 1969 | } |
1890 | sg_copy_to_buffer(areq->src, nents, | 1970 | sg_copy_to_buffer(areq->src, nents, |
1891 | req_ctx->buf + req_ctx->nbuf, nbytes); | 1971 | ctx_buf + req_ctx->nbuf, nbytes); |
1892 | req_ctx->nbuf += nbytes; | 1972 | req_ctx->nbuf += nbytes; |
1893 | return 0; | 1973 | return 0; |
1894 | } | 1974 | } |
@@ -1909,13 +1989,27 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) | |||
1909 | } | 1989 | } |
1910 | 1990 | ||
1911 | /* Chain in any previously buffered data */ | 1991 | /* Chain in any previously buffered data */ |
1912 | if (req_ctx->nbuf) { | 1992 | if (!is_sec1 && req_ctx->nbuf) { |
1913 | nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1; | 1993 | nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1; |
1914 | sg_init_table(req_ctx->bufsl, nsg); | 1994 | sg_init_table(req_ctx->bufsl, nsg); |
1915 | sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf); | 1995 | sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf); |
1916 | if (nsg > 1) | 1996 | if (nsg > 1) |
1917 | sg_chain(req_ctx->bufsl, 2, areq->src); | 1997 | sg_chain(req_ctx->bufsl, 2, areq->src); |
1918 | req_ctx->psrc = req_ctx->bufsl; | 1998 | req_ctx->psrc = req_ctx->bufsl; |
1999 | } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) { | ||
2000 | if (nbytes_to_hash > blocksize) | ||
2001 | offset = blocksize - req_ctx->nbuf; | ||
2002 | else | ||
2003 | offset = nbytes_to_hash - req_ctx->nbuf; | ||
2004 | nents = sg_nents_for_len(areq->src, offset); | ||
2005 | if (nents < 0) { | ||
2006 | dev_err(ctx->dev, "Invalid number of src SG.\n"); | ||
2007 | return nents; | ||
2008 | } | ||
2009 | sg_copy_to_buffer(areq->src, nents, | ||
2010 | ctx_buf + req_ctx->nbuf, offset); | ||
2011 | req_ctx->nbuf += offset; | ||
2012 | req_ctx->psrc = areq->src; | ||
1919 | } else | 2013 | } else |
1920 | req_ctx->psrc = areq->src; | 2014 | req_ctx->psrc = areq->src; |
1921 | 2015 | ||
@@ -1926,7 +2020,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) | |||
1926 | return nents; | 2020 | return nents; |
1927 | } | 2021 | } |
1928 | sg_pcopy_to_buffer(areq->src, nents, | 2022 | sg_pcopy_to_buffer(areq->src, nents, |
1929 | req_ctx->bufnext, | 2023 | req_ctx->buf[(req_ctx->buf_idx + 1) & 1], |
1930 | to_hash_later, | 2024 | to_hash_later, |
1931 | nbytes - to_hash_later); | 2025 | nbytes - to_hash_later); |
1932 | } | 2026 | } |
@@ -1948,6 +2042,13 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) | |||
1948 | /* request SEC to INIT hash. */ | 2042 | /* request SEC to INIT hash. */ |
1949 | if (req_ctx->first && !req_ctx->swinit) | 2043 | if (req_ctx->first && !req_ctx->swinit) |
1950 | edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT; | 2044 | edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT; |
2045 | if (is_sec1) { | ||
2046 | dma_addr_t dma_buf = ctx->dma_buf + req_ctx->buf_idx * | ||
2047 | HASH_MAX_BLOCK_SIZE; | ||
2048 | |||
2049 | dma_sync_single_for_device(dev, dma_buf, | ||
2050 | req_ctx->nbuf, DMA_TO_DEVICE); | ||
2051 | } | ||
1951 | 2052 | ||
1952 | /* When the tfm context has a keylen, it's an HMAC. | 2053 | /* When the tfm context has a keylen, it's an HMAC. |
1953 | * A first or last (ie. not middle) descriptor must request HMAC. | 2054 | * A first or last (ie. not middle) descriptor must request HMAC. |
@@ -1955,7 +2056,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) | |||
1955 | if (ctx->keylen && (req_ctx->first || req_ctx->last)) | 2056 | if (ctx->keylen && (req_ctx->first || req_ctx->last)) |
1956 | edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC; | 2057 | edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC; |
1957 | 2058 | ||
1958 | return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, | 2059 | return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset, |
1959 | ahash_done); | 2060 | ahash_done); |
1960 | } | 2061 | } |
1961 | 2062 | ||
@@ -2001,10 +2102,15 @@ static int ahash_export(struct ahash_request *areq, void *out) | |||
2001 | { | 2102 | { |
2002 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | 2103 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); |
2003 | struct talitos_export_state *export = out; | 2104 | struct talitos_export_state *export = out; |
2105 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | ||
2106 | struct talitos_ctx *ctx = crypto_ahash_ctx(ahash); | ||
2107 | struct device *dev = ctx->dev; | ||
2004 | 2108 | ||
2109 | dma_sync_single_for_cpu(dev, ctx->dma_hw_context, | ||
2110 | req_ctx->hw_context_size, DMA_FROM_DEVICE); | ||
2005 | memcpy(export->hw_context, req_ctx->hw_context, | 2111 | memcpy(export->hw_context, req_ctx->hw_context, |
2006 | req_ctx->hw_context_size); | 2112 | req_ctx->hw_context_size); |
2007 | memcpy(export->buf, req_ctx->buf, req_ctx->nbuf); | 2113 | memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf); |
2008 | export->swinit = req_ctx->swinit; | 2114 | export->swinit = req_ctx->swinit; |
2009 | export->first = req_ctx->first; | 2115 | export->first = req_ctx->first; |
2010 | export->last = req_ctx->last; | 2116 | export->last = req_ctx->last; |
@@ -2019,15 +2125,32 @@ static int ahash_import(struct ahash_request *areq, const void *in) | |||
2019 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | 2125 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); |
2020 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); | 2126 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); |
2021 | const struct talitos_export_state *export = in; | 2127 | const struct talitos_export_state *export = in; |
2128 | unsigned int size; | ||
2129 | struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); | ||
2130 | struct device *dev = ctx->dev; | ||
2131 | struct talitos_private *priv = dev_get_drvdata(dev); | ||
2132 | bool is_sec1 = has_ftr_sec1(priv); | ||
2022 | 2133 | ||
2023 | memset(req_ctx, 0, sizeof(*req_ctx)); | 2134 | memset(req_ctx, 0, sizeof(*req_ctx)); |
2024 | req_ctx->hw_context_size = | 2135 | size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE) |
2025 | (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE) | ||
2026 | ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 | 2136 | ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 |
2027 | : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512; | 2137 | : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512; |
2028 | memcpy(req_ctx->hw_context, export->hw_context, | 2138 | req_ctx->hw_context_size = size; |
2029 | req_ctx->hw_context_size); | 2139 | if (ctx->dma_hw_context) |
2030 | memcpy(req_ctx->buf, export->buf, export->nbuf); | 2140 | dma_unmap_single(dev, ctx->dma_hw_context, size, |
2141 | DMA_BIDIRECTIONAL); | ||
2142 | |||
2143 | memcpy(req_ctx->hw_context, export->hw_context, size); | ||
2144 | ctx->dma_hw_context = dma_map_single(dev, req_ctx->hw_context, size, | ||
2145 | DMA_BIDIRECTIONAL); | ||
2146 | if (ctx->dma_buf) | ||
2147 | dma_unmap_single(dev, ctx->dma_buf, sizeof(req_ctx->buf), | ||
2148 | DMA_TO_DEVICE); | ||
2149 | memcpy(req_ctx->buf[0], export->buf, export->nbuf); | ||
2150 | if (is_sec1) | ||
2151 | ctx->dma_buf = dma_map_single(dev, req_ctx->buf, | ||
2152 | sizeof(req_ctx->buf), | ||
2153 | DMA_TO_DEVICE); | ||
2031 | req_ctx->swinit = export->swinit; | 2154 | req_ctx->swinit = export->swinit; |
2032 | req_ctx->first = export->first; | 2155 | req_ctx->first = export->first; |
2033 | req_ctx->last = export->last; | 2156 | req_ctx->last = export->last; |
@@ -2037,22 +2160,6 @@ static int ahash_import(struct ahash_request *areq, const void *in) | |||
2037 | return 0; | 2160 | return 0; |
2038 | } | 2161 | } |
2039 | 2162 | ||
2040 | struct keyhash_result { | ||
2041 | struct completion completion; | ||
2042 | int err; | ||
2043 | }; | ||
2044 | |||
2045 | static void keyhash_complete(struct crypto_async_request *req, int err) | ||
2046 | { | ||
2047 | struct keyhash_result *res = req->data; | ||
2048 | |||
2049 | if (err == -EINPROGRESS) | ||
2050 | return; | ||
2051 | |||
2052 | res->err = err; | ||
2053 | complete(&res->completion); | ||
2054 | } | ||
2055 | |||
2056 | static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen, | 2163 | static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen, |
2057 | u8 *hash) | 2164 | u8 *hash) |
2058 | { | 2165 | { |
@@ -2060,10 +2167,10 @@ static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen, | |||
2060 | 2167 | ||
2061 | struct scatterlist sg[1]; | 2168 | struct scatterlist sg[1]; |
2062 | struct ahash_request *req; | 2169 | struct ahash_request *req; |
2063 | struct keyhash_result hresult; | 2170 | struct crypto_wait wait; |
2064 | int ret; | 2171 | int ret; |
2065 | 2172 | ||
2066 | init_completion(&hresult.completion); | 2173 | crypto_init_wait(&wait); |
2067 | 2174 | ||
2068 | req = ahash_request_alloc(tfm, GFP_KERNEL); | 2175 | req = ahash_request_alloc(tfm, GFP_KERNEL); |
2069 | if (!req) | 2176 | if (!req) |
@@ -2072,25 +2179,13 @@ static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen, | |||
2072 | /* Keep tfm keylen == 0 during hash of the long key */ | 2179 | /* Keep tfm keylen == 0 during hash of the long key */ |
2073 | ctx->keylen = 0; | 2180 | ctx->keylen = 0; |
2074 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | 2181 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
2075 | keyhash_complete, &hresult); | 2182 | crypto_req_done, &wait); |
2076 | 2183 | ||
2077 | sg_init_one(&sg[0], key, keylen); | 2184 | sg_init_one(&sg[0], key, keylen); |
2078 | 2185 | ||
2079 | ahash_request_set_crypt(req, sg, hash, keylen); | 2186 | ahash_request_set_crypt(req, sg, hash, keylen); |
2080 | ret = crypto_ahash_digest(req); | 2187 | ret = crypto_wait_req(crypto_ahash_digest(req), &wait); |
2081 | switch (ret) { | 2188 | |
2082 | case 0: | ||
2083 | break; | ||
2084 | case -EINPROGRESS: | ||
2085 | case -EBUSY: | ||
2086 | ret = wait_for_completion_interruptible( | ||
2087 | &hresult.completion); | ||
2088 | if (!ret) | ||
2089 | ret = hresult.err; | ||
2090 | break; | ||
2091 | default: | ||
2092 | break; | ||
2093 | } | ||
2094 | ahash_request_free(req); | 2189 | ahash_request_free(req); |
2095 | 2190 | ||
2096 | return ret; | 2191 | return ret; |
@@ -2100,6 +2195,7 @@ static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
2100 | unsigned int keylen) | 2195 | unsigned int keylen) |
2101 | { | 2196 | { |
2102 | struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); | 2197 | struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); |
2198 | struct device *dev = ctx->dev; | ||
2103 | unsigned int blocksize = | 2199 | unsigned int blocksize = |
2104 | crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | 2200 | crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); |
2105 | unsigned int digestsize = crypto_ahash_digestsize(tfm); | 2201 | unsigned int digestsize = crypto_ahash_digestsize(tfm); |
@@ -2122,7 +2218,11 @@ static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
2122 | memcpy(ctx->key, hash, digestsize); | 2218 | memcpy(ctx->key, hash, digestsize); |
2123 | } | 2219 | } |
2124 | 2220 | ||
2221 | if (ctx->keylen) | ||
2222 | dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE); | ||
2223 | |||
2125 | ctx->keylen = keysize; | 2224 | ctx->keylen = keysize; |
2225 | ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE); | ||
2126 | 2226 | ||
2127 | return 0; | 2227 | return 0; |
2128 | } | 2228 | } |
@@ -2614,7 +2714,7 @@ static struct talitos_alg_template driver_algs[] = { | |||
2614 | .ivsize = AES_BLOCK_SIZE, | 2714 | .ivsize = AES_BLOCK_SIZE, |
2615 | } | 2715 | } |
2616 | }, | 2716 | }, |
2617 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | 2717 | .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP | |
2618 | DESC_HDR_SEL0_AESU | | 2718 | DESC_HDR_SEL0_AESU | |
2619 | DESC_HDR_MODE0_AESU_CTR, | 2719 | DESC_HDR_MODE0_AESU_CTR, |
2620 | }, | 2720 | }, |
@@ -2951,6 +3051,36 @@ static int talitos_cra_init_ahash(struct crypto_tfm *tfm) | |||
2951 | return 0; | 3051 | return 0; |
2952 | } | 3052 | } |
2953 | 3053 | ||
3054 | static void talitos_cra_exit(struct crypto_tfm *tfm) | ||
3055 | { | ||
3056 | struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); | ||
3057 | struct device *dev = ctx->dev; | ||
3058 | |||
3059 | if (ctx->keylen) | ||
3060 | dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE); | ||
3061 | } | ||
3062 | |||
3063 | static void talitos_cra_exit_ahash(struct crypto_tfm *tfm) | ||
3064 | { | ||
3065 | struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); | ||
3066 | struct device *dev = ctx->dev; | ||
3067 | unsigned int size; | ||
3068 | |||
3069 | talitos_cra_exit(tfm); | ||
3070 | |||
3071 | size = (crypto_ahash_digestsize(__crypto_ahash_cast(tfm)) <= | ||
3072 | SHA256_DIGEST_SIZE) | ||
3073 | ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 | ||
3074 | : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512; | ||
3075 | |||
3076 | if (ctx->dma_hw_context) | ||
3077 | dma_unmap_single(dev, ctx->dma_hw_context, size, | ||
3078 | DMA_BIDIRECTIONAL); | ||
3079 | if (ctx->dma_buf) | ||
3080 | dma_unmap_single(dev, ctx->dma_buf, HASH_MAX_BLOCK_SIZE * 2, | ||
3081 | DMA_TO_DEVICE); | ||
3082 | } | ||
3083 | |||
2954 | /* | 3084 | /* |
2955 | * given the alg's descriptor header template, determine whether descriptor | 3085 | * given the alg's descriptor header template, determine whether descriptor |
2956 | * type and primary/secondary execution units required match the hw | 3086 | * type and primary/secondary execution units required match the hw |
@@ -2989,17 +3119,11 @@ static int talitos_remove(struct platform_device *ofdev) | |||
2989 | break; | 3119 | break; |
2990 | } | 3120 | } |
2991 | list_del(&t_alg->entry); | 3121 | list_del(&t_alg->entry); |
2992 | kfree(t_alg); | ||
2993 | } | 3122 | } |
2994 | 3123 | ||
2995 | if (hw_supports(dev, DESC_HDR_SEL0_RNG)) | 3124 | if (hw_supports(dev, DESC_HDR_SEL0_RNG)) |
2996 | talitos_unregister_rng(dev); | 3125 | talitos_unregister_rng(dev); |
2997 | 3126 | ||
2998 | for (i = 0; priv->chan && i < priv->num_channels; i++) | ||
2999 | kfree(priv->chan[i].fifo); | ||
3000 | |||
3001 | kfree(priv->chan); | ||
3002 | |||
3003 | for (i = 0; i < 2; i++) | 3127 | for (i = 0; i < 2; i++) |
3004 | if (priv->irq[i]) { | 3128 | if (priv->irq[i]) { |
3005 | free_irq(priv->irq[i], dev); | 3129 | free_irq(priv->irq[i], dev); |
@@ -3010,10 +3134,6 @@ static int talitos_remove(struct platform_device *ofdev) | |||
3010 | if (priv->irq[1]) | 3134 | if (priv->irq[1]) |
3011 | tasklet_kill(&priv->done_task[1]); | 3135 | tasklet_kill(&priv->done_task[1]); |
3012 | 3136 | ||
3013 | iounmap(priv->reg); | ||
3014 | |||
3015 | kfree(priv); | ||
3016 | |||
3017 | return 0; | 3137 | return 0; |
3018 | } | 3138 | } |
3019 | 3139 | ||
@@ -3025,7 +3145,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, | |||
3025 | struct talitos_crypto_alg *t_alg; | 3145 | struct talitos_crypto_alg *t_alg; |
3026 | struct crypto_alg *alg; | 3146 | struct crypto_alg *alg; |
3027 | 3147 | ||
3028 | t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL); | 3148 | t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg), |
3149 | GFP_KERNEL); | ||
3029 | if (!t_alg) | 3150 | if (!t_alg) |
3030 | return ERR_PTR(-ENOMEM); | 3151 | return ERR_PTR(-ENOMEM); |
3031 | 3152 | ||
@@ -3035,6 +3156,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, | |||
3035 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | 3156 | case CRYPTO_ALG_TYPE_ABLKCIPHER: |
3036 | alg = &t_alg->algt.alg.crypto; | 3157 | alg = &t_alg->algt.alg.crypto; |
3037 | alg->cra_init = talitos_cra_init; | 3158 | alg->cra_init = talitos_cra_init; |
3159 | alg->cra_exit = talitos_cra_exit; | ||
3038 | alg->cra_type = &crypto_ablkcipher_type; | 3160 | alg->cra_type = &crypto_ablkcipher_type; |
3039 | alg->cra_ablkcipher.setkey = ablkcipher_setkey; | 3161 | alg->cra_ablkcipher.setkey = ablkcipher_setkey; |
3040 | alg->cra_ablkcipher.encrypt = ablkcipher_encrypt; | 3162 | alg->cra_ablkcipher.encrypt = ablkcipher_encrypt; |
@@ -3043,14 +3165,21 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, | |||
3043 | break; | 3165 | break; |
3044 | case CRYPTO_ALG_TYPE_AEAD: | 3166 | case CRYPTO_ALG_TYPE_AEAD: |
3045 | alg = &t_alg->algt.alg.aead.base; | 3167 | alg = &t_alg->algt.alg.aead.base; |
3168 | alg->cra_exit = talitos_cra_exit; | ||
3046 | t_alg->algt.alg.aead.init = talitos_cra_init_aead; | 3169 | t_alg->algt.alg.aead.init = talitos_cra_init_aead; |
3047 | t_alg->algt.alg.aead.setkey = aead_setkey; | 3170 | t_alg->algt.alg.aead.setkey = aead_setkey; |
3048 | t_alg->algt.alg.aead.encrypt = aead_encrypt; | 3171 | t_alg->algt.alg.aead.encrypt = aead_encrypt; |
3049 | t_alg->algt.alg.aead.decrypt = aead_decrypt; | 3172 | t_alg->algt.alg.aead.decrypt = aead_decrypt; |
3173 | if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) && | ||
3174 | !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) { | ||
3175 | devm_kfree(dev, t_alg); | ||
3176 | return ERR_PTR(-ENOTSUPP); | ||
3177 | } | ||
3050 | break; | 3178 | break; |
3051 | case CRYPTO_ALG_TYPE_AHASH: | 3179 | case CRYPTO_ALG_TYPE_AHASH: |
3052 | alg = &t_alg->algt.alg.hash.halg.base; | 3180 | alg = &t_alg->algt.alg.hash.halg.base; |
3053 | alg->cra_init = talitos_cra_init_ahash; | 3181 | alg->cra_init = talitos_cra_init_ahash; |
3182 | alg->cra_exit = talitos_cra_exit_ahash; | ||
3054 | alg->cra_type = &crypto_ahash_type; | 3183 | alg->cra_type = &crypto_ahash_type; |
3055 | t_alg->algt.alg.hash.init = ahash_init; | 3184 | t_alg->algt.alg.hash.init = ahash_init; |
3056 | t_alg->algt.alg.hash.update = ahash_update; | 3185 | t_alg->algt.alg.hash.update = ahash_update; |
@@ -3064,7 +3193,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, | |||
3064 | 3193 | ||
3065 | if (!(priv->features & TALITOS_FTR_HMAC_OK) && | 3194 | if (!(priv->features & TALITOS_FTR_HMAC_OK) && |
3066 | !strncmp(alg->cra_name, "hmac", 4)) { | 3195 | !strncmp(alg->cra_name, "hmac", 4)) { |
3067 | kfree(t_alg); | 3196 | devm_kfree(dev, t_alg); |
3068 | return ERR_PTR(-ENOTSUPP); | 3197 | return ERR_PTR(-ENOTSUPP); |
3069 | } | 3198 | } |
3070 | if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) && | 3199 | if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) && |
@@ -3079,7 +3208,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, | |||
3079 | break; | 3208 | break; |
3080 | default: | 3209 | default: |
3081 | dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type); | 3210 | dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type); |
3082 | kfree(t_alg); | 3211 | devm_kfree(dev, t_alg); |
3083 | return ERR_PTR(-EINVAL); | 3212 | return ERR_PTR(-EINVAL); |
3084 | } | 3213 | } |
3085 | 3214 | ||
@@ -3156,11 +3285,11 @@ static int talitos_probe(struct platform_device *ofdev) | |||
3156 | struct device *dev = &ofdev->dev; | 3285 | struct device *dev = &ofdev->dev; |
3157 | struct device_node *np = ofdev->dev.of_node; | 3286 | struct device_node *np = ofdev->dev.of_node; |
3158 | struct talitos_private *priv; | 3287 | struct talitos_private *priv; |
3159 | const unsigned int *prop; | ||
3160 | int i, err; | 3288 | int i, err; |
3161 | int stride; | 3289 | int stride; |
3290 | struct resource *res; | ||
3162 | 3291 | ||
3163 | priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL); | 3292 | priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL); |
3164 | if (!priv) | 3293 | if (!priv) |
3165 | return -ENOMEM; | 3294 | return -ENOMEM; |
3166 | 3295 | ||
@@ -3172,7 +3301,10 @@ static int talitos_probe(struct platform_device *ofdev) | |||
3172 | 3301 | ||
3173 | spin_lock_init(&priv->reg_lock); | 3302 | spin_lock_init(&priv->reg_lock); |
3174 | 3303 | ||
3175 | priv->reg = of_iomap(np, 0); | 3304 | res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); |
3305 | if (!res) | ||
3306 | return -ENXIO; | ||
3307 | priv->reg = devm_ioremap(dev, res->start, resource_size(res)); | ||
3176 | if (!priv->reg) { | 3308 | if (!priv->reg) { |
3177 | dev_err(dev, "failed to of_iomap\n"); | 3309 | dev_err(dev, "failed to of_iomap\n"); |
3178 | err = -ENOMEM; | 3310 | err = -ENOMEM; |
@@ -3180,21 +3312,11 @@ static int talitos_probe(struct platform_device *ofdev) | |||
3180 | } | 3312 | } |
3181 | 3313 | ||
3182 | /* get SEC version capabilities from device tree */ | 3314 | /* get SEC version capabilities from device tree */ |
3183 | prop = of_get_property(np, "fsl,num-channels", NULL); | 3315 | of_property_read_u32(np, "fsl,num-channels", &priv->num_channels); |
3184 | if (prop) | 3316 | of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len); |
3185 | priv->num_channels = *prop; | 3317 | of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units); |
3186 | 3318 | of_property_read_u32(np, "fsl,descriptor-types-mask", | |
3187 | prop = of_get_property(np, "fsl,channel-fifo-len", NULL); | 3319 | &priv->desc_types); |
3188 | if (prop) | ||
3189 | priv->chfifo_len = *prop; | ||
3190 | |||
3191 | prop = of_get_property(np, "fsl,exec-units-mask", NULL); | ||
3192 | if (prop) | ||
3193 | priv->exec_units = *prop; | ||
3194 | |||
3195 | prop = of_get_property(np, "fsl,descriptor-types-mask", NULL); | ||
3196 | if (prop) | ||
3197 | priv->desc_types = *prop; | ||
3198 | 3320 | ||
3199 | if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len || | 3321 | if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len || |
3200 | !priv->exec_units || !priv->desc_types) { | 3322 | !priv->exec_units || !priv->desc_types) { |
@@ -3244,22 +3366,29 @@ static int talitos_probe(struct platform_device *ofdev) | |||
3244 | goto err_out; | 3366 | goto err_out; |
3245 | 3367 | ||
3246 | if (of_device_is_compatible(np, "fsl,sec1.0")) { | 3368 | if (of_device_is_compatible(np, "fsl,sec1.0")) { |
3247 | tasklet_init(&priv->done_task[0], talitos1_done_4ch, | 3369 | if (priv->num_channels == 1) |
3248 | (unsigned long)dev); | 3370 | tasklet_init(&priv->done_task[0], talitos1_done_ch0, |
3249 | } else { | ||
3250 | if (!priv->irq[1]) { | ||
3251 | tasklet_init(&priv->done_task[0], talitos2_done_4ch, | ||
3252 | (unsigned long)dev); | 3371 | (unsigned long)dev); |
3253 | } else { | 3372 | else |
3373 | tasklet_init(&priv->done_task[0], talitos1_done_4ch, | ||
3374 | (unsigned long)dev); | ||
3375 | } else { | ||
3376 | if (priv->irq[1]) { | ||
3254 | tasklet_init(&priv->done_task[0], talitos2_done_ch0_2, | 3377 | tasklet_init(&priv->done_task[0], talitos2_done_ch0_2, |
3255 | (unsigned long)dev); | 3378 | (unsigned long)dev); |
3256 | tasklet_init(&priv->done_task[1], talitos2_done_ch1_3, | 3379 | tasklet_init(&priv->done_task[1], talitos2_done_ch1_3, |
3257 | (unsigned long)dev); | 3380 | (unsigned long)dev); |
3381 | } else if (priv->num_channels == 1) { | ||
3382 | tasklet_init(&priv->done_task[0], talitos2_done_ch0, | ||
3383 | (unsigned long)dev); | ||
3384 | } else { | ||
3385 | tasklet_init(&priv->done_task[0], talitos2_done_4ch, | ||
3386 | (unsigned long)dev); | ||
3258 | } | 3387 | } |
3259 | } | 3388 | } |
3260 | 3389 | ||
3261 | priv->chan = kzalloc(sizeof(struct talitos_channel) * | 3390 | priv->chan = devm_kzalloc(dev, sizeof(struct talitos_channel) * |
3262 | priv->num_channels, GFP_KERNEL); | 3391 | priv->num_channels, GFP_KERNEL); |
3263 | if (!priv->chan) { | 3392 | if (!priv->chan) { |
3264 | dev_err(dev, "failed to allocate channel management space\n"); | 3393 | dev_err(dev, "failed to allocate channel management space\n"); |
3265 | err = -ENOMEM; | 3394 | err = -ENOMEM; |
@@ -3276,8 +3405,9 @@ static int talitos_probe(struct platform_device *ofdev) | |||
3276 | spin_lock_init(&priv->chan[i].head_lock); | 3405 | spin_lock_init(&priv->chan[i].head_lock); |
3277 | spin_lock_init(&priv->chan[i].tail_lock); | 3406 | spin_lock_init(&priv->chan[i].tail_lock); |
3278 | 3407 | ||
3279 | priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) * | 3408 | priv->chan[i].fifo = devm_kzalloc(dev, |
3280 | priv->fifo_len, GFP_KERNEL); | 3409 | sizeof(struct talitos_request) * |
3410 | priv->fifo_len, GFP_KERNEL); | ||
3281 | if (!priv->chan[i].fifo) { | 3411 | if (!priv->chan[i].fifo) { |
3282 | dev_err(dev, "failed to allocate request fifo %d\n", i); | 3412 | dev_err(dev, "failed to allocate request fifo %d\n", i); |
3283 | err = -ENOMEM; | 3413 | err = -ENOMEM; |
@@ -3343,7 +3473,7 @@ static int talitos_probe(struct platform_device *ofdev) | |||
3343 | if (err) { | 3473 | if (err) { |
3344 | dev_err(dev, "%s alg registration failed\n", | 3474 | dev_err(dev, "%s alg registration failed\n", |
3345 | alg->cra_driver_name); | 3475 | alg->cra_driver_name); |
3346 | kfree(t_alg); | 3476 | devm_kfree(dev, t_alg); |
3347 | } else | 3477 | } else |
3348 | list_add_tail(&t_alg->entry, &priv->alg_list); | 3478 | list_add_tail(&t_alg->entry, &priv->alg_list); |
3349 | } | 3479 | } |
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index 8dd8f40e2771..a65a63e0d6c1 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h | |||
@@ -52,8 +52,6 @@ struct talitos_ptr { | |||
52 | __be32 ptr; /* address */ | 52 | __be32 ptr; /* address */ |
53 | }; | 53 | }; |
54 | 54 | ||
55 | static const struct talitos_ptr zero_entry; | ||
56 | |||
57 | /* descriptor */ | 55 | /* descriptor */ |
58 | struct talitos_desc { | 56 | struct talitos_desc { |
59 | __be32 hdr; /* header high bits */ | 57 | __be32 hdr; /* header high bits */ |
@@ -210,9 +208,13 @@ static inline bool has_ftr_sec1(struct talitos_private *priv) | |||
210 | #define TALITOS_ISR 0x1010 /* interrupt status register */ | 208 | #define TALITOS_ISR 0x1010 /* interrupt status register */ |
211 | #define TALITOS1_ISR_4CHERR ISR1_FORMAT(0xa) /* 4 ch errors mask */ | 209 | #define TALITOS1_ISR_4CHERR ISR1_FORMAT(0xa) /* 4 ch errors mask */ |
212 | #define TALITOS1_ISR_4CHDONE ISR1_FORMAT(0x5) /* 4 ch done mask */ | 210 | #define TALITOS1_ISR_4CHDONE ISR1_FORMAT(0x5) /* 4 ch done mask */ |
211 | #define TALITOS1_ISR_CH_0_ERR (2 << 28) /* ch 0 errors mask */ | ||
212 | #define TALITOS1_ISR_CH_0_DONE (1 << 28) /* ch 0 done mask */ | ||
213 | #define TALITOS1_ISR_TEA_ERR 0x00000040 | 213 | #define TALITOS1_ISR_TEA_ERR 0x00000040 |
214 | #define TALITOS2_ISR_4CHERR ISR2_FORMAT(0xa) /* 4 ch errors mask */ | 214 | #define TALITOS2_ISR_4CHERR ISR2_FORMAT(0xa) /* 4 ch errors mask */ |
215 | #define TALITOS2_ISR_4CHDONE ISR2_FORMAT(0x5) /* 4 ch done mask */ | 215 | #define TALITOS2_ISR_4CHDONE ISR2_FORMAT(0x5) /* 4 ch done mask */ |
216 | #define TALITOS2_ISR_CH_0_ERR 2 /* ch 0 errors mask */ | ||
217 | #define TALITOS2_ISR_CH_0_DONE 1 /* ch 0 done mask */ | ||
216 | #define TALITOS2_ISR_CH_0_2_ERR ISR2_FORMAT(0x2) /* ch 0, 2 err mask */ | 218 | #define TALITOS2_ISR_CH_0_2_ERR ISR2_FORMAT(0x2) /* ch 0, 2 err mask */ |
217 | #define TALITOS2_ISR_CH_0_2_DONE ISR2_FORMAT(0x1) /* ch 0, 2 done mask */ | 219 | #define TALITOS2_ISR_CH_0_2_DONE ISR2_FORMAT(0x1) /* ch 0, 2 done mask */ |
218 | #define TALITOS2_ISR_CH_1_3_ERR ISR2_FORMAT(0x8) /* ch 1, 3 err mask */ | 220 | #define TALITOS2_ISR_CH_1_3_ERR ISR2_FORMAT(0x8) /* ch 1, 3 err mask */ |
@@ -234,6 +236,7 @@ static inline bool has_ftr_sec1(struct talitos_private *priv) | |||
234 | #define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */ | 236 | #define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */ |
235 | #define TALITOS_CCCR_LO_EAE 0x20 /* extended address enable */ | 237 | #define TALITOS_CCCR_LO_EAE 0x20 /* extended address enable */ |
236 | #define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */ | 238 | #define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */ |
239 | #define TALITOS_CCCR_LO_NE 0x8 /* fetch next descriptor enab. */ | ||
237 | #define TALITOS_CCCR_LO_NT 0x4 /* notification type */ | 240 | #define TALITOS_CCCR_LO_NT 0x4 /* notification type */ |
238 | #define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */ | 241 | #define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */ |
239 | #define TALITOS1_CCCR_LO_RESET 0x1 /* channel reset on SEC1 */ | 242 | #define TALITOS1_CCCR_LO_RESET 0x1 /* channel reset on SEC1 */ |
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index 790f7cadc1ed..765f53e548ab 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c | |||
@@ -1751,7 +1751,6 @@ static void __exit ux500_cryp_mod_fini(void) | |||
1751 | { | 1751 | { |
1752 | pr_debug("[%s] is called!", __func__); | 1752 | pr_debug("[%s] is called!", __func__); |
1753 | platform_driver_unregister(&cryp_driver); | 1753 | platform_driver_unregister(&cryp_driver); |
1754 | return; | ||
1755 | } | 1754 | } |
1756 | 1755 | ||
1757 | module_init(ux500_cryp_mod_init); | 1756 | module_init(ux500_cryp_mod_init); |
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c index 5035b0dc1e40..abe8c15450df 100644 --- a/drivers/crypto/virtio/virtio_crypto_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_algs.c | |||
@@ -319,7 +319,7 @@ static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, | |||
319 | struct virtio_crypto *vcrypto = | 319 | struct virtio_crypto *vcrypto = |
320 | virtcrypto_get_dev_node(node); | 320 | virtcrypto_get_dev_node(node); |
321 | if (!vcrypto) { | 321 | if (!vcrypto) { |
322 | pr_err("virtio_crypto: Could not find a virtio device in the system"); | 322 | pr_err("virtio_crypto: Could not find a virtio device in the system\n"); |
323 | return -ENODEV; | 323 | return -ENODEV; |
324 | } | 324 | } |
325 | 325 | ||
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c index 17d84217dd76..fc60d00a2e84 100644 --- a/drivers/crypto/vmx/aes_ctr.c +++ b/drivers/crypto/vmx/aes_ctr.c | |||
@@ -27,21 +27,23 @@ | |||
27 | #include <asm/switch_to.h> | 27 | #include <asm/switch_to.h> |
28 | #include <crypto/aes.h> | 28 | #include <crypto/aes.h> |
29 | #include <crypto/scatterwalk.h> | 29 | #include <crypto/scatterwalk.h> |
30 | #include <crypto/skcipher.h> | ||
31 | |||
30 | #include "aesp8-ppc.h" | 32 | #include "aesp8-ppc.h" |
31 | 33 | ||
32 | struct p8_aes_ctr_ctx { | 34 | struct p8_aes_ctr_ctx { |
33 | struct crypto_blkcipher *fallback; | 35 | struct crypto_skcipher *fallback; |
34 | struct aes_key enc_key; | 36 | struct aes_key enc_key; |
35 | }; | 37 | }; |
36 | 38 | ||
37 | static int p8_aes_ctr_init(struct crypto_tfm *tfm) | 39 | static int p8_aes_ctr_init(struct crypto_tfm *tfm) |
38 | { | 40 | { |
39 | const char *alg = crypto_tfm_alg_name(tfm); | 41 | const char *alg = crypto_tfm_alg_name(tfm); |
40 | struct crypto_blkcipher *fallback; | 42 | struct crypto_skcipher *fallback; |
41 | struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); | 43 | struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); |
42 | 44 | ||
43 | fallback = | 45 | fallback = crypto_alloc_skcipher(alg, 0, |
44 | crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); | 46 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); |
45 | if (IS_ERR(fallback)) { | 47 | if (IS_ERR(fallback)) { |
46 | printk(KERN_ERR | 48 | printk(KERN_ERR |
47 | "Failed to allocate transformation for '%s': %ld\n", | 49 | "Failed to allocate transformation for '%s': %ld\n", |
@@ -49,11 +51,11 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm) | |||
49 | return PTR_ERR(fallback); | 51 | return PTR_ERR(fallback); |
50 | } | 52 | } |
51 | printk(KERN_INFO "Using '%s' as fallback implementation.\n", | 53 | printk(KERN_INFO "Using '%s' as fallback implementation.\n", |
52 | crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); | 54 | crypto_skcipher_driver_name(fallback)); |
53 | 55 | ||
54 | crypto_blkcipher_set_flags( | 56 | crypto_skcipher_set_flags( |
55 | fallback, | 57 | fallback, |
56 | crypto_blkcipher_get_flags((struct crypto_blkcipher *)tfm)); | 58 | crypto_skcipher_get_flags((struct crypto_skcipher *)tfm)); |
57 | ctx->fallback = fallback; | 59 | ctx->fallback = fallback; |
58 | 60 | ||
59 | return 0; | 61 | return 0; |
@@ -64,7 +66,7 @@ static void p8_aes_ctr_exit(struct crypto_tfm *tfm) | |||
64 | struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); | 66 | struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); |
65 | 67 | ||
66 | if (ctx->fallback) { | 68 | if (ctx->fallback) { |
67 | crypto_free_blkcipher(ctx->fallback); | 69 | crypto_free_skcipher(ctx->fallback); |
68 | ctx->fallback = NULL; | 70 | ctx->fallback = NULL; |
69 | } | 71 | } |
70 | } | 72 | } |
@@ -83,7 +85,7 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key, | |||
83 | pagefault_enable(); | 85 | pagefault_enable(); |
84 | preempt_enable(); | 86 | preempt_enable(); |
85 | 87 | ||
86 | ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); | 88 | ret += crypto_skcipher_setkey(ctx->fallback, key, keylen); |
87 | return ret; | 89 | return ret; |
88 | } | 90 | } |
89 | 91 | ||
@@ -117,15 +119,14 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc, | |||
117 | struct blkcipher_walk walk; | 119 | struct blkcipher_walk walk; |
118 | struct p8_aes_ctr_ctx *ctx = | 120 | struct p8_aes_ctr_ctx *ctx = |
119 | crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); | 121 | crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); |
120 | struct blkcipher_desc fallback_desc = { | ||
121 | .tfm = ctx->fallback, | ||
122 | .info = desc->info, | ||
123 | .flags = desc->flags | ||
124 | }; | ||
125 | 122 | ||
126 | if (in_interrupt()) { | 123 | if (in_interrupt()) { |
127 | ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, | 124 | SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); |
128 | nbytes); | 125 | skcipher_request_set_tfm(req, ctx->fallback); |
126 | skcipher_request_set_callback(req, desc->flags, NULL, NULL); | ||
127 | skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); | ||
128 | ret = crypto_skcipher_encrypt(req); | ||
129 | skcipher_request_zero(req); | ||
129 | } else { | 130 | } else { |
130 | blkcipher_walk_init(&walk, dst, src, nbytes); | 131 | blkcipher_walk_init(&walk, dst, src, nbytes); |
131 | ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); | 132 | ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); |
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index fba93237a780..aedb8222836b 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c | |||
@@ -92,74 +92,33 @@ static sector_t verity_position_at_level(struct dm_verity *v, sector_t block, | |||
92 | return block >> (level * v->hash_per_block_bits); | 92 | return block >> (level * v->hash_per_block_bits); |
93 | } | 93 | } |
94 | 94 | ||
95 | /* | ||
96 | * Callback function for asynchrnous crypto API completion notification | ||
97 | */ | ||
98 | static void verity_op_done(struct crypto_async_request *base, int err) | ||
99 | { | ||
100 | struct verity_result *res = (struct verity_result *)base->data; | ||
101 | |||
102 | if (err == -EINPROGRESS) | ||
103 | return; | ||
104 | |||
105 | res->err = err; | ||
106 | complete(&res->completion); | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * Wait for async crypto API callback | ||
111 | */ | ||
112 | static inline int verity_complete_op(struct verity_result *res, int ret) | ||
113 | { | ||
114 | switch (ret) { | ||
115 | case 0: | ||
116 | break; | ||
117 | |||
118 | case -EINPROGRESS: | ||
119 | case -EBUSY: | ||
120 | ret = wait_for_completion_interruptible(&res->completion); | ||
121 | if (!ret) | ||
122 | ret = res->err; | ||
123 | reinit_completion(&res->completion); | ||
124 | break; | ||
125 | |||
126 | default: | ||
127 | DMERR("verity_wait_hash: crypto op submission failed: %d", ret); | ||
128 | } | ||
129 | |||
130 | if (unlikely(ret < 0)) | ||
131 | DMERR("verity_wait_hash: crypto op failed: %d", ret); | ||
132 | |||
133 | return ret; | ||
134 | } | ||
135 | |||
136 | static int verity_hash_update(struct dm_verity *v, struct ahash_request *req, | 95 | static int verity_hash_update(struct dm_verity *v, struct ahash_request *req, |
137 | const u8 *data, size_t len, | 96 | const u8 *data, size_t len, |
138 | struct verity_result *res) | 97 | struct crypto_wait *wait) |
139 | { | 98 | { |
140 | struct scatterlist sg; | 99 | struct scatterlist sg; |
141 | 100 | ||
142 | sg_init_one(&sg, data, len); | 101 | sg_init_one(&sg, data, len); |
143 | ahash_request_set_crypt(req, &sg, NULL, len); | 102 | ahash_request_set_crypt(req, &sg, NULL, len); |
144 | 103 | ||
145 | return verity_complete_op(res, crypto_ahash_update(req)); | 104 | return crypto_wait_req(crypto_ahash_update(req), wait); |
146 | } | 105 | } |
147 | 106 | ||
148 | /* | 107 | /* |
149 | * Wrapper for crypto_ahash_init, which handles verity salting. | 108 | * Wrapper for crypto_ahash_init, which handles verity salting. |
150 | */ | 109 | */ |
151 | static int verity_hash_init(struct dm_verity *v, struct ahash_request *req, | 110 | static int verity_hash_init(struct dm_verity *v, struct ahash_request *req, |
152 | struct verity_result *res) | 111 | struct crypto_wait *wait) |
153 | { | 112 | { |
154 | int r; | 113 | int r; |
155 | 114 | ||
156 | ahash_request_set_tfm(req, v->tfm); | 115 | ahash_request_set_tfm(req, v->tfm); |
157 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP | | 116 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP | |
158 | CRYPTO_TFM_REQ_MAY_BACKLOG, | 117 | CRYPTO_TFM_REQ_MAY_BACKLOG, |
159 | verity_op_done, (void *)res); | 118 | crypto_req_done, (void *)wait); |
160 | init_completion(&res->completion); | 119 | crypto_init_wait(wait); |
161 | 120 | ||
162 | r = verity_complete_op(res, crypto_ahash_init(req)); | 121 | r = crypto_wait_req(crypto_ahash_init(req), wait); |
163 | 122 | ||
164 | if (unlikely(r < 0)) { | 123 | if (unlikely(r < 0)) { |
165 | DMERR("crypto_ahash_init failed: %d", r); | 124 | DMERR("crypto_ahash_init failed: %d", r); |
@@ -167,18 +126,18 @@ static int verity_hash_init(struct dm_verity *v, struct ahash_request *req, | |||
167 | } | 126 | } |
168 | 127 | ||
169 | if (likely(v->salt_size && (v->version >= 1))) | 128 | if (likely(v->salt_size && (v->version >= 1))) |
170 | r = verity_hash_update(v, req, v->salt, v->salt_size, res); | 129 | r = verity_hash_update(v, req, v->salt, v->salt_size, wait); |
171 | 130 | ||
172 | return r; | 131 | return r; |
173 | } | 132 | } |
174 | 133 | ||
175 | static int verity_hash_final(struct dm_verity *v, struct ahash_request *req, | 134 | static int verity_hash_final(struct dm_verity *v, struct ahash_request *req, |
176 | u8 *digest, struct verity_result *res) | 135 | u8 *digest, struct crypto_wait *wait) |
177 | { | 136 | { |
178 | int r; | 137 | int r; |
179 | 138 | ||
180 | if (unlikely(v->salt_size && (!v->version))) { | 139 | if (unlikely(v->salt_size && (!v->version))) { |
181 | r = verity_hash_update(v, req, v->salt, v->salt_size, res); | 140 | r = verity_hash_update(v, req, v->salt, v->salt_size, wait); |
182 | 141 | ||
183 | if (r < 0) { | 142 | if (r < 0) { |
184 | DMERR("verity_hash_final failed updating salt: %d", r); | 143 | DMERR("verity_hash_final failed updating salt: %d", r); |
@@ -187,7 +146,7 @@ static int verity_hash_final(struct dm_verity *v, struct ahash_request *req, | |||
187 | } | 146 | } |
188 | 147 | ||
189 | ahash_request_set_crypt(req, NULL, digest, 0); | 148 | ahash_request_set_crypt(req, NULL, digest, 0); |
190 | r = verity_complete_op(res, crypto_ahash_final(req)); | 149 | r = crypto_wait_req(crypto_ahash_final(req), wait); |
191 | out: | 150 | out: |
192 | return r; | 151 | return r; |
193 | } | 152 | } |
@@ -196,17 +155,17 @@ int verity_hash(struct dm_verity *v, struct ahash_request *req, | |||
196 | const u8 *data, size_t len, u8 *digest) | 155 | const u8 *data, size_t len, u8 *digest) |
197 | { | 156 | { |
198 | int r; | 157 | int r; |
199 | struct verity_result res; | 158 | struct crypto_wait wait; |
200 | 159 | ||
201 | r = verity_hash_init(v, req, &res); | 160 | r = verity_hash_init(v, req, &wait); |
202 | if (unlikely(r < 0)) | 161 | if (unlikely(r < 0)) |
203 | goto out; | 162 | goto out; |
204 | 163 | ||
205 | r = verity_hash_update(v, req, data, len, &res); | 164 | r = verity_hash_update(v, req, data, len, &wait); |
206 | if (unlikely(r < 0)) | 165 | if (unlikely(r < 0)) |
207 | goto out; | 166 | goto out; |
208 | 167 | ||
209 | r = verity_hash_final(v, req, digest, &res); | 168 | r = verity_hash_final(v, req, digest, &wait); |
210 | 169 | ||
211 | out: | 170 | out: |
212 | return r; | 171 | return r; |
@@ -389,7 +348,7 @@ out: | |||
389 | * Calculates the digest for the given bio | 348 | * Calculates the digest for the given bio |
390 | */ | 349 | */ |
391 | int verity_for_io_block(struct dm_verity *v, struct dm_verity_io *io, | 350 | int verity_for_io_block(struct dm_verity *v, struct dm_verity_io *io, |
392 | struct bvec_iter *iter, struct verity_result *res) | 351 | struct bvec_iter *iter, struct crypto_wait *wait) |
393 | { | 352 | { |
394 | unsigned int todo = 1 << v->data_dev_block_bits; | 353 | unsigned int todo = 1 << v->data_dev_block_bits; |
395 | struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size); | 354 | struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size); |
@@ -414,7 +373,7 @@ int verity_for_io_block(struct dm_verity *v, struct dm_verity_io *io, | |||
414 | */ | 373 | */ |
415 | sg_set_page(&sg, bv.bv_page, len, bv.bv_offset); | 374 | sg_set_page(&sg, bv.bv_page, len, bv.bv_offset); |
416 | ahash_request_set_crypt(req, &sg, NULL, len); | 375 | ahash_request_set_crypt(req, &sg, NULL, len); |
417 | r = verity_complete_op(res, crypto_ahash_update(req)); | 376 | r = crypto_wait_req(crypto_ahash_update(req), wait); |
418 | 377 | ||
419 | if (unlikely(r < 0)) { | 378 | if (unlikely(r < 0)) { |
420 | DMERR("verity_for_io_block crypto op failed: %d", r); | 379 | DMERR("verity_for_io_block crypto op failed: %d", r); |
@@ -482,7 +441,7 @@ static int verity_verify_io(struct dm_verity_io *io) | |||
482 | struct dm_verity *v = io->v; | 441 | struct dm_verity *v = io->v; |
483 | struct bvec_iter start; | 442 | struct bvec_iter start; |
484 | unsigned b; | 443 | unsigned b; |
485 | struct verity_result res; | 444 | struct crypto_wait wait; |
486 | 445 | ||
487 | for (b = 0; b < io->n_blocks; b++) { | 446 | for (b = 0; b < io->n_blocks; b++) { |
488 | int r; | 447 | int r; |
@@ -507,17 +466,17 @@ static int verity_verify_io(struct dm_verity_io *io) | |||
507 | continue; | 466 | continue; |
508 | } | 467 | } |
509 | 468 | ||
510 | r = verity_hash_init(v, req, &res); | 469 | r = verity_hash_init(v, req, &wait); |
511 | if (unlikely(r < 0)) | 470 | if (unlikely(r < 0)) |
512 | return r; | 471 | return r; |
513 | 472 | ||
514 | start = io->iter; | 473 | start = io->iter; |
515 | r = verity_for_io_block(v, io, &io->iter, &res); | 474 | r = verity_for_io_block(v, io, &io->iter, &wait); |
516 | if (unlikely(r < 0)) | 475 | if (unlikely(r < 0)) |
517 | return r; | 476 | return r; |
518 | 477 | ||
519 | r = verity_hash_final(v, req, verity_io_real_digest(v, io), | 478 | r = verity_hash_final(v, req, verity_io_real_digest(v, io), |
520 | &res); | 479 | &wait); |
521 | if (unlikely(r < 0)) | 480 | if (unlikely(r < 0)) |
522 | return r; | 481 | return r; |
523 | 482 | ||
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h index a59e0ada6fd3..b675bc015512 100644 --- a/drivers/md/dm-verity.h +++ b/drivers/md/dm-verity.h | |||
@@ -90,11 +90,6 @@ struct dm_verity_io { | |||
90 | */ | 90 | */ |
91 | }; | 91 | }; |
92 | 92 | ||
93 | struct verity_result { | ||
94 | struct completion completion; | ||
95 | int err; | ||
96 | }; | ||
97 | |||
98 | static inline struct ahash_request *verity_io_hash_req(struct dm_verity *v, | 93 | static inline struct ahash_request *verity_io_hash_req(struct dm_verity *v, |
99 | struct dm_verity_io *io) | 94 | struct dm_verity_io *io) |
100 | { | 95 | { |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 43f52a8fe708..fe5cedd96a24 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c | |||
@@ -1537,7 +1537,13 @@ int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb) | |||
1537 | */ | 1537 | */ |
1538 | static inline int is_ofld_imm(const struct sk_buff *skb) | 1538 | static inline int is_ofld_imm(const struct sk_buff *skb) |
1539 | { | 1539 | { |
1540 | return skb->len <= MAX_IMM_TX_PKT_LEN; | 1540 | struct work_request_hdr *req = (struct work_request_hdr *)skb->data; |
1541 | unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi)); | ||
1542 | |||
1543 | if (opcode == FW_CRYPTO_LOOKASIDE_WR) | ||
1544 | return skb->len <= SGE_MAX_WR_LEN; | ||
1545 | else | ||
1546 | return skb->len <= MAX_IMM_TX_PKT_LEN; | ||
1541 | } | 1547 | } |
1542 | 1548 | ||
1543 | /** | 1549 | /** |
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index bdb963d0ba32..e06740436b92 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c | |||
@@ -2087,22 +2087,6 @@ init_sg(struct smb_rqst *rqst, u8 *sign) | |||
2087 | return sg; | 2087 | return sg; |
2088 | } | 2088 | } |
2089 | 2089 | ||
2090 | struct cifs_crypt_result { | ||
2091 | int err; | ||
2092 | struct completion completion; | ||
2093 | }; | ||
2094 | |||
2095 | static void cifs_crypt_complete(struct crypto_async_request *req, int err) | ||
2096 | { | ||
2097 | struct cifs_crypt_result *res = req->data; | ||
2098 | |||
2099 | if (err == -EINPROGRESS) | ||
2100 | return; | ||
2101 | |||
2102 | res->err = err; | ||
2103 | complete(&res->completion); | ||
2104 | } | ||
2105 | |||
2106 | static int | 2090 | static int |
2107 | smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key) | 2091 | smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key) |
2108 | { | 2092 | { |
@@ -2143,12 +2127,10 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc) | |||
2143 | struct aead_request *req; | 2127 | struct aead_request *req; |
2144 | char *iv; | 2128 | char *iv; |
2145 | unsigned int iv_len; | 2129 | unsigned int iv_len; |
2146 | struct cifs_crypt_result result = {0, }; | 2130 | DECLARE_CRYPTO_WAIT(wait); |
2147 | struct crypto_aead *tfm; | 2131 | struct crypto_aead *tfm; |
2148 | unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize); | 2132 | unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize); |
2149 | 2133 | ||
2150 | init_completion(&result.completion); | ||
2151 | |||
2152 | rc = smb2_get_enc_key(server, tr_hdr->SessionId, enc, key); | 2134 | rc = smb2_get_enc_key(server, tr_hdr->SessionId, enc, key); |
2153 | if (rc) { | 2135 | if (rc) { |
2154 | cifs_dbg(VFS, "%s: Could not get %scryption key\n", __func__, | 2136 | cifs_dbg(VFS, "%s: Could not get %scryption key\n", __func__, |
@@ -2208,14 +2190,10 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc) | |||
2208 | aead_request_set_ad(req, assoc_data_len); | 2190 | aead_request_set_ad(req, assoc_data_len); |
2209 | 2191 | ||
2210 | aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | 2192 | aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
2211 | cifs_crypt_complete, &result); | 2193 | crypto_req_done, &wait); |
2212 | 2194 | ||
2213 | rc = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); | 2195 | rc = crypto_wait_req(enc ? crypto_aead_encrypt(req) |
2214 | 2196 | : crypto_aead_decrypt(req), &wait); | |
2215 | if (rc == -EINPROGRESS || rc == -EBUSY) { | ||
2216 | wait_for_completion(&result.completion); | ||
2217 | rc = result.err; | ||
2218 | } | ||
2219 | 2197 | ||
2220 | if (!rc && enc) | 2198 | if (!rc && enc) |
2221 | memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE); | 2199 | memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE); |
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index c7835df7e7b8..80a3cada53de 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c | |||
@@ -126,21 +126,6 @@ struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, gfp_t gfp_flags) | |||
126 | } | 126 | } |
127 | EXPORT_SYMBOL(fscrypt_get_ctx); | 127 | EXPORT_SYMBOL(fscrypt_get_ctx); |
128 | 128 | ||
129 | /** | ||
130 | * page_crypt_complete() - completion callback for page crypto | ||
131 | * @req: The asynchronous cipher request context | ||
132 | * @res: The result of the cipher operation | ||
133 | */ | ||
134 | static void page_crypt_complete(struct crypto_async_request *req, int res) | ||
135 | { | ||
136 | struct fscrypt_completion_result *ecr = req->data; | ||
137 | |||
138 | if (res == -EINPROGRESS) | ||
139 | return; | ||
140 | ecr->res = res; | ||
141 | complete(&ecr->completion); | ||
142 | } | ||
143 | |||
144 | int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, | 129 | int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, |
145 | u64 lblk_num, struct page *src_page, | 130 | u64 lblk_num, struct page *src_page, |
146 | struct page *dest_page, unsigned int len, | 131 | struct page *dest_page, unsigned int len, |
@@ -151,7 +136,7 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, | |||
151 | u8 padding[FS_IV_SIZE - sizeof(__le64)]; | 136 | u8 padding[FS_IV_SIZE - sizeof(__le64)]; |
152 | } iv; | 137 | } iv; |
153 | struct skcipher_request *req = NULL; | 138 | struct skcipher_request *req = NULL; |
154 | DECLARE_FS_COMPLETION_RESULT(ecr); | 139 | DECLARE_CRYPTO_WAIT(wait); |
155 | struct scatterlist dst, src; | 140 | struct scatterlist dst, src; |
156 | struct fscrypt_info *ci = inode->i_crypt_info; | 141 | struct fscrypt_info *ci = inode->i_crypt_info; |
157 | struct crypto_skcipher *tfm = ci->ci_ctfm; | 142 | struct crypto_skcipher *tfm = ci->ci_ctfm; |
@@ -179,7 +164,7 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, | |||
179 | 164 | ||
180 | skcipher_request_set_callback( | 165 | skcipher_request_set_callback( |
181 | req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, | 166 | req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, |
182 | page_crypt_complete, &ecr); | 167 | crypto_req_done, &wait); |
183 | 168 | ||
184 | sg_init_table(&dst, 1); | 169 | sg_init_table(&dst, 1); |
185 | sg_set_page(&dst, dest_page, len, offs); | 170 | sg_set_page(&dst, dest_page, len, offs); |
@@ -187,14 +172,9 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, | |||
187 | sg_set_page(&src, src_page, len, offs); | 172 | sg_set_page(&src, src_page, len, offs); |
188 | skcipher_request_set_crypt(req, &src, &dst, len, &iv); | 173 | skcipher_request_set_crypt(req, &src, &dst, len, &iv); |
189 | if (rw == FS_DECRYPT) | 174 | if (rw == FS_DECRYPT) |
190 | res = crypto_skcipher_decrypt(req); | 175 | res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait); |
191 | else | 176 | else |
192 | res = crypto_skcipher_encrypt(req); | 177 | res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); |
193 | if (res == -EINPROGRESS || res == -EBUSY) { | ||
194 | BUG_ON(req->base.data != &ecr); | ||
195 | wait_for_completion(&ecr.completion); | ||
196 | res = ecr.res; | ||
197 | } | ||
198 | skcipher_request_free(req); | 178 | skcipher_request_free(req); |
199 | if (res) { | 179 | if (res) { |
200 | printk_ratelimited(KERN_ERR | 180 | printk_ratelimited(KERN_ERR |
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index 8606da1df0aa..0b94cd2b3222 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c | |||
@@ -16,21 +16,6 @@ | |||
16 | #include "fscrypt_private.h" | 16 | #include "fscrypt_private.h" |
17 | 17 | ||
18 | /** | 18 | /** |
19 | * fname_crypt_complete() - completion callback for filename crypto | ||
20 | * @req: The asynchronous cipher request context | ||
21 | * @res: The result of the cipher operation | ||
22 | */ | ||
23 | static void fname_crypt_complete(struct crypto_async_request *req, int res) | ||
24 | { | ||
25 | struct fscrypt_completion_result *ecr = req->data; | ||
26 | |||
27 | if (res == -EINPROGRESS) | ||
28 | return; | ||
29 | ecr->res = res; | ||
30 | complete(&ecr->completion); | ||
31 | } | ||
32 | |||
33 | /** | ||
34 | * fname_encrypt() - encrypt a filename | 19 | * fname_encrypt() - encrypt a filename |
35 | * | 20 | * |
36 | * The caller must have allocated sufficient memory for the @oname string. | 21 | * The caller must have allocated sufficient memory for the @oname string. |
@@ -41,7 +26,7 @@ static int fname_encrypt(struct inode *inode, | |||
41 | const struct qstr *iname, struct fscrypt_str *oname) | 26 | const struct qstr *iname, struct fscrypt_str *oname) |
42 | { | 27 | { |
43 | struct skcipher_request *req = NULL; | 28 | struct skcipher_request *req = NULL; |
44 | DECLARE_FS_COMPLETION_RESULT(ecr); | 29 | DECLARE_CRYPTO_WAIT(wait); |
45 | struct fscrypt_info *ci = inode->i_crypt_info; | 30 | struct fscrypt_info *ci = inode->i_crypt_info; |
46 | struct crypto_skcipher *tfm = ci->ci_ctfm; | 31 | struct crypto_skcipher *tfm = ci->ci_ctfm; |
47 | int res = 0; | 32 | int res = 0; |
@@ -77,17 +62,12 @@ static int fname_encrypt(struct inode *inode, | |||
77 | } | 62 | } |
78 | skcipher_request_set_callback(req, | 63 | skcipher_request_set_callback(req, |
79 | CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, | 64 | CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, |
80 | fname_crypt_complete, &ecr); | 65 | crypto_req_done, &wait); |
81 | sg_init_one(&sg, oname->name, cryptlen); | 66 | sg_init_one(&sg, oname->name, cryptlen); |
82 | skcipher_request_set_crypt(req, &sg, &sg, cryptlen, iv); | 67 | skcipher_request_set_crypt(req, &sg, &sg, cryptlen, iv); |
83 | 68 | ||
84 | /* Do the encryption */ | 69 | /* Do the encryption */ |
85 | res = crypto_skcipher_encrypt(req); | 70 | res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); |
86 | if (res == -EINPROGRESS || res == -EBUSY) { | ||
87 | /* Request is being completed asynchronously; wait for it */ | ||
88 | wait_for_completion(&ecr.completion); | ||
89 | res = ecr.res; | ||
90 | } | ||
91 | skcipher_request_free(req); | 71 | skcipher_request_free(req); |
92 | if (res < 0) { | 72 | if (res < 0) { |
93 | printk_ratelimited(KERN_ERR | 73 | printk_ratelimited(KERN_ERR |
@@ -111,7 +91,7 @@ static int fname_decrypt(struct inode *inode, | |||
111 | struct fscrypt_str *oname) | 91 | struct fscrypt_str *oname) |
112 | { | 92 | { |
113 | struct skcipher_request *req = NULL; | 93 | struct skcipher_request *req = NULL; |
114 | DECLARE_FS_COMPLETION_RESULT(ecr); | 94 | DECLARE_CRYPTO_WAIT(wait); |
115 | struct scatterlist src_sg, dst_sg; | 95 | struct scatterlist src_sg, dst_sg; |
116 | struct fscrypt_info *ci = inode->i_crypt_info; | 96 | struct fscrypt_info *ci = inode->i_crypt_info; |
117 | struct crypto_skcipher *tfm = ci->ci_ctfm; | 97 | struct crypto_skcipher *tfm = ci->ci_ctfm; |
@@ -132,7 +112,7 @@ static int fname_decrypt(struct inode *inode, | |||
132 | } | 112 | } |
133 | skcipher_request_set_callback(req, | 113 | skcipher_request_set_callback(req, |
134 | CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, | 114 | CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, |
135 | fname_crypt_complete, &ecr); | 115 | crypto_req_done, &wait); |
136 | 116 | ||
137 | /* Initialize IV */ | 117 | /* Initialize IV */ |
138 | memset(iv, 0, FS_CRYPTO_BLOCK_SIZE); | 118 | memset(iv, 0, FS_CRYPTO_BLOCK_SIZE); |
@@ -141,11 +121,7 @@ static int fname_decrypt(struct inode *inode, | |||
141 | sg_init_one(&src_sg, iname->name, iname->len); | 121 | sg_init_one(&src_sg, iname->name, iname->len); |
142 | sg_init_one(&dst_sg, oname->name, oname->len); | 122 | sg_init_one(&dst_sg, oname->name, oname->len); |
143 | skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv); | 123 | skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv); |
144 | res = crypto_skcipher_decrypt(req); | 124 | res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait); |
145 | if (res == -EINPROGRESS || res == -EBUSY) { | ||
146 | wait_for_completion(&ecr.completion); | ||
147 | res = ecr.res; | ||
148 | } | ||
149 | skcipher_request_free(req); | 125 | skcipher_request_free(req); |
150 | if (res < 0) { | 126 | if (res < 0) { |
151 | printk_ratelimited(KERN_ERR | 127 | printk_ratelimited(KERN_ERR |
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index 092e9dad1414..e54e602b473f 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h | |||
@@ -70,16 +70,6 @@ typedef enum { | |||
70 | #define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001 | 70 | #define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001 |
71 | #define FS_CTX_HAS_BOUNCE_BUFFER_FL 0x00000002 | 71 | #define FS_CTX_HAS_BOUNCE_BUFFER_FL 0x00000002 |
72 | 72 | ||
73 | struct fscrypt_completion_result { | ||
74 | struct completion completion; | ||
75 | int res; | ||
76 | }; | ||
77 | |||
78 | #define DECLARE_FS_COMPLETION_RESULT(ecr) \ | ||
79 | struct fscrypt_completion_result ecr = { \ | ||
80 | COMPLETION_INITIALIZER_ONSTACK((ecr).completion), 0 } | ||
81 | |||
82 | |||
83 | /* crypto.c */ | 73 | /* crypto.c */ |
84 | extern int fscrypt_initialize(unsigned int cop_flags); | 74 | extern int fscrypt_initialize(unsigned int cop_flags); |
85 | extern struct workqueue_struct *fscrypt_read_workqueue; | 75 | extern struct workqueue_struct *fscrypt_read_workqueue; |
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c index 577dfaf0367f..4486b3eecc27 100644 --- a/fs/crypto/keyinfo.c +++ b/fs/crypto/keyinfo.c | |||
@@ -18,17 +18,6 @@ | |||
18 | 18 | ||
19 | static struct crypto_shash *essiv_hash_tfm; | 19 | static struct crypto_shash *essiv_hash_tfm; |
20 | 20 | ||
21 | static void derive_crypt_complete(struct crypto_async_request *req, int rc) | ||
22 | { | ||
23 | struct fscrypt_completion_result *ecr = req->data; | ||
24 | |||
25 | if (rc == -EINPROGRESS) | ||
26 | return; | ||
27 | |||
28 | ecr->res = rc; | ||
29 | complete(&ecr->completion); | ||
30 | } | ||
31 | |||
32 | /** | 21 | /** |
33 | * derive_key_aes() - Derive a key using AES-128-ECB | 22 | * derive_key_aes() - Derive a key using AES-128-ECB |
34 | * @deriving_key: Encryption key used for derivation. | 23 | * @deriving_key: Encryption key used for derivation. |
@@ -43,7 +32,7 @@ static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE], | |||
43 | { | 32 | { |
44 | int res = 0; | 33 | int res = 0; |
45 | struct skcipher_request *req = NULL; | 34 | struct skcipher_request *req = NULL; |
46 | DECLARE_FS_COMPLETION_RESULT(ecr); | 35 | DECLARE_CRYPTO_WAIT(wait); |
47 | struct scatterlist src_sg, dst_sg; | 36 | struct scatterlist src_sg, dst_sg; |
48 | struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0); | 37 | struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0); |
49 | 38 | ||
@@ -60,7 +49,7 @@ static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE], | |||
60 | } | 49 | } |
61 | skcipher_request_set_callback(req, | 50 | skcipher_request_set_callback(req, |
62 | CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, | 51 | CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, |
63 | derive_crypt_complete, &ecr); | 52 | crypto_req_done, &wait); |
64 | res = crypto_skcipher_setkey(tfm, deriving_key, | 53 | res = crypto_skcipher_setkey(tfm, deriving_key, |
65 | FS_AES_128_ECB_KEY_SIZE); | 54 | FS_AES_128_ECB_KEY_SIZE); |
66 | if (res < 0) | 55 | if (res < 0) |
@@ -70,11 +59,7 @@ static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE], | |||
70 | sg_init_one(&dst_sg, derived_raw_key, source_key->size); | 59 | sg_init_one(&dst_sg, derived_raw_key, source_key->size); |
71 | skcipher_request_set_crypt(req, &src_sg, &dst_sg, source_key->size, | 60 | skcipher_request_set_crypt(req, &src_sg, &dst_sg, source_key->size, |
72 | NULL); | 61 | NULL); |
73 | res = crypto_skcipher_encrypt(req); | 62 | res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); |
74 | if (res == -EINPROGRESS || res == -EBUSY) { | ||
75 | wait_for_completion(&ecr.completion); | ||
76 | res = ecr.res; | ||
77 | } | ||
78 | out: | 63 | out: |
79 | skcipher_request_free(req); | 64 | skcipher_request_free(req); |
80 | crypto_free_skcipher(tfm); | 65 | crypto_free_skcipher(tfm); |
diff --git a/include/crypto/dh.h b/include/crypto/dh.h index f638998fb6d0..71e1bb24d79f 100644 --- a/include/crypto/dh.h +++ b/include/crypto/dh.h | |||
@@ -53,7 +53,7 @@ struct dh { | |||
53 | * | 53 | * |
54 | * Return: size of the key in bytes | 54 | * Return: size of the key in bytes |
55 | */ | 55 | */ |
56 | int crypto_dh_key_len(const struct dh *params); | 56 | unsigned int crypto_dh_key_len(const struct dh *params); |
57 | 57 | ||
58 | /** | 58 | /** |
59 | * crypto_dh_encode_key() - encode the private key | 59 | * crypto_dh_encode_key() - encode the private key |
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h index 22f884c97387..8f941102af36 100644 --- a/include/crypto/drbg.h +++ b/include/crypto/drbg.h | |||
@@ -126,8 +126,7 @@ struct drbg_state { | |||
126 | __u8 *ctr_null_value; /* CTR mode aligned zero buf */ | 126 | __u8 *ctr_null_value; /* CTR mode aligned zero buf */ |
127 | __u8 *outscratchpadbuf; /* CTR mode output scratchpad */ | 127 | __u8 *outscratchpadbuf; /* CTR mode output scratchpad */ |
128 | __u8 *outscratchpad; /* CTR mode aligned outbuf */ | 128 | __u8 *outscratchpad; /* CTR mode aligned outbuf */ |
129 | struct completion ctr_completion; /* CTR mode async handler */ | 129 | struct crypto_wait ctr_wait; /* CTR mode async wait obj */ |
130 | int ctr_async_err; /* CTR mode async error */ | ||
131 | 130 | ||
132 | bool seeded; /* DRBG fully seeded? */ | 131 | bool seeded; /* DRBG fully seeded? */ |
133 | bool pr; /* Prediction resistance enabled? */ | 132 | bool pr; /* Prediction resistance enabled? */ |
diff --git a/include/crypto/ecdh.h b/include/crypto/ecdh.h index 1aff2a8a3a68..d696317c43a8 100644 --- a/include/crypto/ecdh.h +++ b/include/crypto/ecdh.h | |||
@@ -54,7 +54,7 @@ struct ecdh { | |||
54 | * | 54 | * |
55 | * Return: size of the key in bytes | 55 | * Return: size of the key in bytes |
56 | */ | 56 | */ |
57 | int crypto_ecdh_key_len(const struct ecdh *params); | 57 | unsigned int crypto_ecdh_key_len(const struct ecdh *params); |
58 | 58 | ||
59 | /** | 59 | /** |
60 | * crypto_ecdh_encode_key() - encode the private key | 60 | * crypto_ecdh_encode_key() - encode the private key |
diff --git a/include/crypto/gcm.h b/include/crypto/gcm.h new file mode 100644 index 000000000000..c50e057ea17e --- /dev/null +++ b/include/crypto/gcm.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #ifndef _CRYPTO_GCM_H | ||
2 | #define _CRYPTO_GCM_H | ||
3 | |||
4 | #define GCM_AES_IV_SIZE 12 | ||
5 | #define GCM_RFC4106_IV_SIZE 8 | ||
6 | #define GCM_RFC4543_IV_SIZE 8 | ||
7 | |||
8 | #endif | ||
diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h index 0977fb18ff68..fa0a63d298dc 100644 --- a/include/crypto/gf128mul.h +++ b/include/crypto/gf128mul.h | |||
@@ -227,7 +227,7 @@ struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g); | |||
227 | struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g); | 227 | struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g); |
228 | void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t); | 228 | void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t); |
229 | void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t); | 229 | void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t); |
230 | 230 | void gf128mul_x8_ble(le128 *r, const le128 *x); | |
231 | static inline void gf128mul_free_4k(struct gf128mul_4k *t) | 231 | static inline void gf128mul_free_4k(struct gf128mul_4k *t) |
232 | { | 232 | { |
233 | kzfree(t); | 233 | kzfree(t); |
diff --git a/include/crypto/hash.h b/include/crypto/hash.h index b5727bcd2336..0ed31fd80242 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h | |||
@@ -75,6 +75,7 @@ struct ahash_request { | |||
75 | * state of the HASH transformation at the beginning. This shall fill in | 75 | * state of the HASH transformation at the beginning. This shall fill in |
76 | * the internal structures used during the entire duration of the whole | 76 | * the internal structures used during the entire duration of the whole |
77 | * transformation. No data processing happens at this point. | 77 | * transformation. No data processing happens at this point. |
78 | * Note: mandatory. | ||
78 | * @update: Push a chunk of data into the driver for transformation. This | 79 | * @update: Push a chunk of data into the driver for transformation. This |
79 | * function actually pushes blocks of data from upper layers into the | 80 | * function actually pushes blocks of data from upper layers into the |
80 | * driver, which then passes those to the hardware as seen fit. This | 81 | * driver, which then passes those to the hardware as seen fit. This |
@@ -84,16 +85,20 @@ struct ahash_request { | |||
84 | * context, as this function may be called in parallel with the same | 85 | * context, as this function may be called in parallel with the same |
85 | * transformation object. Data processing can happen synchronously | 86 | * transformation object. Data processing can happen synchronously |
86 | * [SHASH] or asynchronously [AHASH] at this point. | 87 | * [SHASH] or asynchronously [AHASH] at this point. |
88 | * Note: mandatory. | ||
87 | * @final: Retrieve result from the driver. This function finalizes the | 89 | * @final: Retrieve result from the driver. This function finalizes the |
88 | * transformation and retrieves the resulting hash from the driver and | 90 | * transformation and retrieves the resulting hash from the driver and |
89 | * pushes it back to upper layers. No data processing happens at this | 91 | * pushes it back to upper layers. No data processing happens at this |
90 | * point. | 92 | * point unless hardware requires it to finish the transformation |
93 | * (then the data buffered by the device driver is processed). | ||
94 | * Note: mandatory. | ||
91 | * @finup: Combination of @update and @final. This function is effectively a | 95 | * @finup: Combination of @update and @final. This function is effectively a |
92 | * combination of @update and @final calls issued in sequence. As some | 96 | * combination of @update and @final calls issued in sequence. As some |
93 | * hardware cannot do @update and @final separately, this callback was | 97 | * hardware cannot do @update and @final separately, this callback was |
94 | * added to allow such hardware to be used at least by IPsec. Data | 98 | * added to allow such hardware to be used at least by IPsec. Data |
95 | * processing can happen synchronously [SHASH] or asynchronously [AHASH] | 99 | * processing can happen synchronously [SHASH] or asynchronously [AHASH] |
96 | * at this point. | 100 | * at this point. |
101 | * Note: optional. | ||
97 | * @digest: Combination of @init and @update and @final. This function | 102 | * @digest: Combination of @init and @update and @final. This function |
98 | * effectively behaves as the entire chain of operations, @init, | 103 | * effectively behaves as the entire chain of operations, @init, |
99 | * @update and @final issued in sequence. Just like @finup, this was | 104 | * @update and @final issued in sequence. Just like @finup, this was |
@@ -416,11 +421,10 @@ static inline bool crypto_ahash_has_setkey(struct crypto_ahash *tfm) | |||
416 | * needed to perform the cipher operation | 421 | * needed to perform the cipher operation |
417 | * | 422 | * |
418 | * This function is a "short-hand" for the function calls of | 423 | * This function is a "short-hand" for the function calls of |
419 | * crypto_ahash_update and crypto_shash_final. The parameters have the same | 424 | * crypto_ahash_update and crypto_ahash_final. The parameters have the same |
420 | * meaning as discussed for those separate functions. | 425 | * meaning as discussed for those separate functions. |
421 | * | 426 | * |
422 | * Return: 0 if the message digest creation was successful; < 0 if an error | 427 | * Return: see crypto_ahash_final() |
423 | * occurred | ||
424 | */ | 428 | */ |
425 | int crypto_ahash_finup(struct ahash_request *req); | 429 | int crypto_ahash_finup(struct ahash_request *req); |
426 | 430 | ||
@@ -433,8 +437,11 @@ int crypto_ahash_finup(struct ahash_request *req); | |||
433 | * based on all data added to the cipher handle. The message digest is placed | 437 | * based on all data added to the cipher handle. The message digest is placed |
434 | * into the output buffer registered with the ahash_request handle. | 438 | * into the output buffer registered with the ahash_request handle. |
435 | * | 439 | * |
436 | * Return: 0 if the message digest creation was successful; < 0 if an error | 440 | * Return: |
437 | * occurred | 441 | * 0 if the message digest was successfully calculated; |
442 | * -EINPROGRESS if data is feeded into hardware (DMA) or queued for later; | ||
443 | * -EBUSY if queue is full and request should be resubmitted later; | ||
444 | * other < 0 if an error occurred | ||
438 | */ | 445 | */ |
439 | int crypto_ahash_final(struct ahash_request *req); | 446 | int crypto_ahash_final(struct ahash_request *req); |
440 | 447 | ||
@@ -447,8 +454,7 @@ int crypto_ahash_final(struct ahash_request *req); | |||
447 | * crypto_ahash_update and crypto_ahash_final. The parameters have the same | 454 | * crypto_ahash_update and crypto_ahash_final. The parameters have the same |
448 | * meaning as discussed for those separate three functions. | 455 | * meaning as discussed for those separate three functions. |
449 | * | 456 | * |
450 | * Return: 0 if the message digest creation was successful; < 0 if an error | 457 | * Return: see crypto_ahash_final() |
451 | * occurred | ||
452 | */ | 458 | */ |
453 | int crypto_ahash_digest(struct ahash_request *req); | 459 | int crypto_ahash_digest(struct ahash_request *req); |
454 | 460 | ||
@@ -493,8 +499,7 @@ static inline int crypto_ahash_import(struct ahash_request *req, const void *in) | |||
493 | * handle. Any potentially existing state created by previous operations is | 499 | * handle. Any potentially existing state created by previous operations is |
494 | * discarded. | 500 | * discarded. |
495 | * | 501 | * |
496 | * Return: 0 if the message digest initialization was successful; < 0 if an | 502 | * Return: see crypto_ahash_final() |
497 | * error occurred | ||
498 | */ | 503 | */ |
499 | static inline int crypto_ahash_init(struct ahash_request *req) | 504 | static inline int crypto_ahash_init(struct ahash_request *req) |
500 | { | 505 | { |
@@ -510,8 +515,7 @@ static inline int crypto_ahash_init(struct ahash_request *req) | |||
510 | * is pointed to by the scatter/gather list registered in the &ahash_request | 515 | * is pointed to by the scatter/gather list registered in the &ahash_request |
511 | * handle | 516 | * handle |
512 | * | 517 | * |
513 | * Return: 0 if the message digest update was successful; < 0 if an error | 518 | * Return: see crypto_ahash_final() |
514 | * occurred | ||
515 | */ | 519 | */ |
516 | static inline int crypto_ahash_update(struct ahash_request *req) | 520 | static inline int crypto_ahash_update(struct ahash_request *req) |
517 | { | 521 | { |
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index 75ec9c662268..6abf0a3604dc 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h | |||
@@ -40,11 +40,6 @@ struct alg_sock { | |||
40 | void *private; | 40 | void *private; |
41 | }; | 41 | }; |
42 | 42 | ||
43 | struct af_alg_completion { | ||
44 | struct completion completion; | ||
45 | int err; | ||
46 | }; | ||
47 | |||
48 | struct af_alg_control { | 43 | struct af_alg_control { |
49 | struct af_alg_iv *iv; | 44 | struct af_alg_iv *iv; |
50 | int op; | 45 | int op; |
@@ -152,7 +147,7 @@ struct af_alg_ctx { | |||
152 | void *iv; | 147 | void *iv; |
153 | size_t aead_assoclen; | 148 | size_t aead_assoclen; |
154 | 149 | ||
155 | struct af_alg_completion completion; | 150 | struct crypto_wait wait; |
156 | 151 | ||
157 | size_t used; | 152 | size_t used; |
158 | size_t rcvused; | 153 | size_t rcvused; |
@@ -177,19 +172,11 @@ void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new); | |||
177 | 172 | ||
178 | int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con); | 173 | int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con); |
179 | 174 | ||
180 | int af_alg_wait_for_completion(int err, struct af_alg_completion *completion); | ||
181 | void af_alg_complete(struct crypto_async_request *req, int err); | ||
182 | |||
183 | static inline struct alg_sock *alg_sk(struct sock *sk) | 175 | static inline struct alg_sock *alg_sk(struct sock *sk) |
184 | { | 176 | { |
185 | return (struct alg_sock *)sk; | 177 | return (struct alg_sock *)sk; |
186 | } | 178 | } |
187 | 179 | ||
188 | static inline void af_alg_init_completion(struct af_alg_completion *completion) | ||
189 | { | ||
190 | init_completion(&completion->completion); | ||
191 | } | ||
192 | |||
193 | /** | 180 | /** |
194 | * Size of available buffer for sending data from user space to kernel. | 181 | * Size of available buffer for sending data from user space to kernel. |
195 | * | 182 | * |
diff --git a/include/crypto/sm3.h b/include/crypto/sm3.h new file mode 100644 index 000000000000..1438942dc773 --- /dev/null +++ b/include/crypto/sm3.h | |||
@@ -0,0 +1,40 @@ | |||
1 | /* | ||
2 | * Common values for SM3 algorithm | ||
3 | */ | ||
4 | |||
5 | #ifndef _CRYPTO_SM3_H | ||
6 | #define _CRYPTO_SM3_H | ||
7 | |||
8 | #include <linux/types.h> | ||
9 | |||
10 | #define SM3_DIGEST_SIZE 32 | ||
11 | #define SM3_BLOCK_SIZE 64 | ||
12 | |||
13 | #define SM3_T1 0x79CC4519 | ||
14 | #define SM3_T2 0x7A879D8A | ||
15 | |||
16 | #define SM3_IVA 0x7380166f | ||
17 | #define SM3_IVB 0x4914b2b9 | ||
18 | #define SM3_IVC 0x172442d7 | ||
19 | #define SM3_IVD 0xda8a0600 | ||
20 | #define SM3_IVE 0xa96f30bc | ||
21 | #define SM3_IVF 0x163138aa | ||
22 | #define SM3_IVG 0xe38dee4d | ||
23 | #define SM3_IVH 0xb0fb0e4e | ||
24 | |||
25 | extern const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE]; | ||
26 | |||
27 | struct sm3_state { | ||
28 | u32 state[SM3_DIGEST_SIZE / 4]; | ||
29 | u64 count; | ||
30 | u8 buffer[SM3_BLOCK_SIZE]; | ||
31 | }; | ||
32 | |||
33 | struct shash_desc; | ||
34 | |||
35 | extern int crypto_sm3_update(struct shash_desc *desc, const u8 *data, | ||
36 | unsigned int len); | ||
37 | |||
38 | extern int crypto_sm3_finup(struct shash_desc *desc, const u8 *data, | ||
39 | unsigned int len, u8 *hash); | ||
40 | #endif | ||
diff --git a/include/crypto/sm3_base.h b/include/crypto/sm3_base.h new file mode 100644 index 000000000000..256948e39296 --- /dev/null +++ b/include/crypto/sm3_base.h | |||
@@ -0,0 +1,117 @@ | |||
1 | /* | ||
2 | * sm3_base.h - core logic for SM3 implementations | ||
3 | * | ||
4 | * Copyright (C) 2017 ARM Limited or its affiliates. | ||
5 | * Written by Gilad Ben-Yossef <gilad@benyossef.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #include <crypto/internal/hash.h> | ||
21 | #include <crypto/sm3.h> | ||
22 | #include <linux/crypto.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <asm/unaligned.h> | ||
25 | |||
26 | typedef void (sm3_block_fn)(struct sm3_state *sst, u8 const *src, int blocks); | ||
27 | |||
28 | static inline int sm3_base_init(struct shash_desc *desc) | ||
29 | { | ||
30 | struct sm3_state *sctx = shash_desc_ctx(desc); | ||
31 | |||
32 | sctx->state[0] = SM3_IVA; | ||
33 | sctx->state[1] = SM3_IVB; | ||
34 | sctx->state[2] = SM3_IVC; | ||
35 | sctx->state[3] = SM3_IVD; | ||
36 | sctx->state[4] = SM3_IVE; | ||
37 | sctx->state[5] = SM3_IVF; | ||
38 | sctx->state[6] = SM3_IVG; | ||
39 | sctx->state[7] = SM3_IVH; | ||
40 | sctx->count = 0; | ||
41 | |||
42 | return 0; | ||
43 | } | ||
44 | |||
45 | static inline int sm3_base_do_update(struct shash_desc *desc, | ||
46 | const u8 *data, | ||
47 | unsigned int len, | ||
48 | sm3_block_fn *block_fn) | ||
49 | { | ||
50 | struct sm3_state *sctx = shash_desc_ctx(desc); | ||
51 | unsigned int partial = sctx->count % SM3_BLOCK_SIZE; | ||
52 | |||
53 | sctx->count += len; | ||
54 | |||
55 | if (unlikely((partial + len) >= SM3_BLOCK_SIZE)) { | ||
56 | int blocks; | ||
57 | |||
58 | if (partial) { | ||
59 | int p = SM3_BLOCK_SIZE - partial; | ||
60 | |||
61 | memcpy(sctx->buffer + partial, data, p); | ||
62 | data += p; | ||
63 | len -= p; | ||
64 | |||
65 | block_fn(sctx, sctx->buffer, 1); | ||
66 | } | ||
67 | |||
68 | blocks = len / SM3_BLOCK_SIZE; | ||
69 | len %= SM3_BLOCK_SIZE; | ||
70 | |||
71 | if (blocks) { | ||
72 | block_fn(sctx, data, blocks); | ||
73 | data += blocks * SM3_BLOCK_SIZE; | ||
74 | } | ||
75 | partial = 0; | ||
76 | } | ||
77 | if (len) | ||
78 | memcpy(sctx->buffer + partial, data, len); | ||
79 | |||
80 | return 0; | ||
81 | } | ||
82 | |||
83 | static inline int sm3_base_do_finalize(struct shash_desc *desc, | ||
84 | sm3_block_fn *block_fn) | ||
85 | { | ||
86 | const int bit_offset = SM3_BLOCK_SIZE - sizeof(__be64); | ||
87 | struct sm3_state *sctx = shash_desc_ctx(desc); | ||
88 | __be64 *bits = (__be64 *)(sctx->buffer + bit_offset); | ||
89 | unsigned int partial = sctx->count % SM3_BLOCK_SIZE; | ||
90 | |||
91 | sctx->buffer[partial++] = 0x80; | ||
92 | if (partial > bit_offset) { | ||
93 | memset(sctx->buffer + partial, 0x0, SM3_BLOCK_SIZE - partial); | ||
94 | partial = 0; | ||
95 | |||
96 | block_fn(sctx, sctx->buffer, 1); | ||
97 | } | ||
98 | |||
99 | memset(sctx->buffer + partial, 0x0, bit_offset - partial); | ||
100 | *bits = cpu_to_be64(sctx->count << 3); | ||
101 | block_fn(sctx, sctx->buffer, 1); | ||
102 | |||
103 | return 0; | ||
104 | } | ||
105 | |||
106 | static inline int sm3_base_finish(struct shash_desc *desc, u8 *out) | ||
107 | { | ||
108 | struct sm3_state *sctx = shash_desc_ctx(desc); | ||
109 | __be32 *digest = (__be32 *)out; | ||
110 | int i; | ||
111 | |||
112 | for (i = 0; i < SM3_DIGEST_SIZE / sizeof(__be32); i++) | ||
113 | put_unaligned_be32(sctx->state[i], digest++); | ||
114 | |||
115 | *sctx = (struct sm3_state){}; | ||
116 | return 0; | ||
117 | } | ||
diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 84da9978e951..78508ca4b108 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/string.h> | 25 | #include <linux/string.h> |
26 | #include <linux/uaccess.h> | 26 | #include <linux/uaccess.h> |
27 | #include <linux/completion.h> | ||
27 | 28 | ||
28 | /* | 29 | /* |
29 | * Autoloaded crypto modules should only use a prefixed name to avoid allowing | 30 | * Autoloaded crypto modules should only use a prefixed name to avoid allowing |
@@ -468,6 +469,45 @@ struct crypto_alg { | |||
468 | } CRYPTO_MINALIGN_ATTR; | 469 | } CRYPTO_MINALIGN_ATTR; |
469 | 470 | ||
470 | /* | 471 | /* |
472 | * A helper struct for waiting for completion of async crypto ops | ||
473 | */ | ||
474 | struct crypto_wait { | ||
475 | struct completion completion; | ||
476 | int err; | ||
477 | }; | ||
478 | |||
479 | /* | ||
480 | * Macro for declaring a crypto op async wait object on stack | ||
481 | */ | ||
482 | #define DECLARE_CRYPTO_WAIT(_wait) \ | ||
483 | struct crypto_wait _wait = { \ | ||
484 | COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 } | ||
485 | |||
486 | /* | ||
487 | * Async ops completion helper functioons | ||
488 | */ | ||
489 | void crypto_req_done(struct crypto_async_request *req, int err); | ||
490 | |||
491 | static inline int crypto_wait_req(int err, struct crypto_wait *wait) | ||
492 | { | ||
493 | switch (err) { | ||
494 | case -EINPROGRESS: | ||
495 | case -EBUSY: | ||
496 | wait_for_completion(&wait->completion); | ||
497 | reinit_completion(&wait->completion); | ||
498 | err = wait->err; | ||
499 | break; | ||
500 | }; | ||
501 | |||
502 | return err; | ||
503 | } | ||
504 | |||
505 | static inline void crypto_init_wait(struct crypto_wait *wait) | ||
506 | { | ||
507 | init_completion(&wait->completion); | ||
508 | } | ||
509 | |||
510 | /* | ||
471 | * Algorithm registration interface. | 511 | * Algorithm registration interface. |
472 | */ | 512 | */ |
473 | int crypto_register_alg(struct crypto_alg *alg); | 513 | int crypto_register_alg(struct crypto_alg *alg); |
diff --git a/include/linux/padata.h b/include/linux/padata.h index 2f9c1f93b1ce..5d13d25da2c8 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h | |||
@@ -37,6 +37,7 @@ | |||
37 | * @list: List entry, to attach to the padata lists. | 37 | * @list: List entry, to attach to the padata lists. |
38 | * @pd: Pointer to the internal control structure. | 38 | * @pd: Pointer to the internal control structure. |
39 | * @cb_cpu: Callback cpu for serializatioon. | 39 | * @cb_cpu: Callback cpu for serializatioon. |
40 | * @cpu: Cpu for parallelization. | ||
40 | * @seq_nr: Sequence number of the parallelized data object. | 41 | * @seq_nr: Sequence number of the parallelized data object. |
41 | * @info: Used to pass information from the parallel to the serial function. | 42 | * @info: Used to pass information from the parallel to the serial function. |
42 | * @parallel: Parallel execution function. | 43 | * @parallel: Parallel execution function. |
@@ -46,6 +47,7 @@ struct padata_priv { | |||
46 | struct list_head list; | 47 | struct list_head list; |
47 | struct parallel_data *pd; | 48 | struct parallel_data *pd; |
48 | int cb_cpu; | 49 | int cb_cpu; |
50 | int cpu; | ||
49 | int info; | 51 | int info; |
50 | void (*parallel)(struct padata_priv *padata); | 52 | void (*parallel)(struct padata_priv *padata); |
51 | void (*serial)(struct padata_priv *padata); | 53 | void (*serial)(struct padata_priv *padata); |
@@ -85,6 +87,7 @@ struct padata_serial_queue { | |||
85 | * @swork: work struct for serialization. | 87 | * @swork: work struct for serialization. |
86 | * @pd: Backpointer to the internal control structure. | 88 | * @pd: Backpointer to the internal control structure. |
87 | * @work: work struct for parallelization. | 89 | * @work: work struct for parallelization. |
90 | * @reorder_work: work struct for reordering. | ||
88 | * @num_obj: Number of objects that are processed by this cpu. | 91 | * @num_obj: Number of objects that are processed by this cpu. |
89 | * @cpu_index: Index of the cpu. | 92 | * @cpu_index: Index of the cpu. |
90 | */ | 93 | */ |
@@ -93,6 +96,7 @@ struct padata_parallel_queue { | |||
93 | struct padata_list reorder; | 96 | struct padata_list reorder; |
94 | struct parallel_data *pd; | 97 | struct parallel_data *pd; |
95 | struct work_struct work; | 98 | struct work_struct work; |
99 | struct work_struct reorder_work; | ||
96 | atomic_t num_obj; | 100 | atomic_t num_obj; |
97 | int cpu_index; | 101 | int cpu_index; |
98 | }; | 102 | }; |
diff --git a/kernel/padata.c b/kernel/padata.c index 868f947166d7..f262c9a4e70a 100644 --- a/kernel/padata.c +++ b/kernel/padata.c | |||
@@ -131,6 +131,7 @@ int padata_do_parallel(struct padata_instance *pinst, | |||
131 | padata->cb_cpu = cb_cpu; | 131 | padata->cb_cpu = cb_cpu; |
132 | 132 | ||
133 | target_cpu = padata_cpu_hash(pd); | 133 | target_cpu = padata_cpu_hash(pd); |
134 | padata->cpu = target_cpu; | ||
134 | queue = per_cpu_ptr(pd->pqueue, target_cpu); | 135 | queue = per_cpu_ptr(pd->pqueue, target_cpu); |
135 | 136 | ||
136 | spin_lock(&queue->parallel.lock); | 137 | spin_lock(&queue->parallel.lock); |
@@ -275,11 +276,51 @@ static void padata_reorder(struct parallel_data *pd) | |||
275 | return; | 276 | return; |
276 | } | 277 | } |
277 | 278 | ||
279 | static void invoke_padata_reorder(struct work_struct *work) | ||
280 | { | ||
281 | struct padata_parallel_queue *pqueue; | ||
282 | struct parallel_data *pd; | ||
283 | |||
284 | local_bh_disable(); | ||
285 | pqueue = container_of(work, struct padata_parallel_queue, reorder_work); | ||
286 | pd = pqueue->pd; | ||
287 | padata_reorder(pd); | ||
288 | local_bh_enable(); | ||
289 | } | ||
290 | |||
278 | static void padata_reorder_timer(unsigned long arg) | 291 | static void padata_reorder_timer(unsigned long arg) |
279 | { | 292 | { |
280 | struct parallel_data *pd = (struct parallel_data *)arg; | 293 | struct parallel_data *pd = (struct parallel_data *)arg; |
294 | unsigned int weight; | ||
295 | int target_cpu, cpu; | ||
281 | 296 | ||
282 | padata_reorder(pd); | 297 | cpu = get_cpu(); |
298 | |||
299 | /* We don't lock pd here to not interfere with parallel processing | ||
300 | * padata_reorder() calls on other CPUs. We just need any CPU out of | ||
301 | * the cpumask.pcpu set. It would be nice if it's the right one but | ||
302 | * it doesn't matter if we're off to the next one by using an outdated | ||
303 | * pd->processed value. | ||
304 | */ | ||
305 | weight = cpumask_weight(pd->cpumask.pcpu); | ||
306 | target_cpu = padata_index_to_cpu(pd, pd->processed % weight); | ||
307 | |||
308 | /* ensure to call the reorder callback on the correct CPU */ | ||
309 | if (cpu != target_cpu) { | ||
310 | struct padata_parallel_queue *pqueue; | ||
311 | struct padata_instance *pinst; | ||
312 | |||
313 | /* The timer function is serialized wrt itself -- no locking | ||
314 | * needed. | ||
315 | */ | ||
316 | pinst = pd->pinst; | ||
317 | pqueue = per_cpu_ptr(pd->pqueue, target_cpu); | ||
318 | queue_work_on(target_cpu, pinst->wq, &pqueue->reorder_work); | ||
319 | } else { | ||
320 | padata_reorder(pd); | ||
321 | } | ||
322 | |||
323 | put_cpu(); | ||
283 | } | 324 | } |
284 | 325 | ||
285 | static void padata_serial_worker(struct work_struct *serial_work) | 326 | static void padata_serial_worker(struct work_struct *serial_work) |
@@ -323,10 +364,21 @@ void padata_do_serial(struct padata_priv *padata) | |||
323 | int cpu; | 364 | int cpu; |
324 | struct padata_parallel_queue *pqueue; | 365 | struct padata_parallel_queue *pqueue; |
325 | struct parallel_data *pd; | 366 | struct parallel_data *pd; |
367 | int reorder_via_wq = 0; | ||
326 | 368 | ||
327 | pd = padata->pd; | 369 | pd = padata->pd; |
328 | 370 | ||
329 | cpu = get_cpu(); | 371 | cpu = get_cpu(); |
372 | |||
373 | /* We need to run on the same CPU padata_do_parallel(.., padata, ..) | ||
374 | * was called on -- or, at least, enqueue the padata object into the | ||
375 | * correct per-cpu queue. | ||
376 | */ | ||
377 | if (cpu != padata->cpu) { | ||
378 | reorder_via_wq = 1; | ||
379 | cpu = padata->cpu; | ||
380 | } | ||
381 | |||
330 | pqueue = per_cpu_ptr(pd->pqueue, cpu); | 382 | pqueue = per_cpu_ptr(pd->pqueue, cpu); |
331 | 383 | ||
332 | spin_lock(&pqueue->reorder.lock); | 384 | spin_lock(&pqueue->reorder.lock); |
@@ -336,7 +388,13 @@ void padata_do_serial(struct padata_priv *padata) | |||
336 | 388 | ||
337 | put_cpu(); | 389 | put_cpu(); |
338 | 390 | ||
339 | padata_reorder(pd); | 391 | /* If we're running on the wrong CPU, call padata_reorder() via a |
392 | * kernel worker. | ||
393 | */ | ||
394 | if (reorder_via_wq) | ||
395 | queue_work_on(cpu, pd->pinst->wq, &pqueue->reorder_work); | ||
396 | else | ||
397 | padata_reorder(pd); | ||
340 | } | 398 | } |
341 | EXPORT_SYMBOL(padata_do_serial); | 399 | EXPORT_SYMBOL(padata_do_serial); |
342 | 400 | ||
@@ -384,8 +442,14 @@ static void padata_init_pqueues(struct parallel_data *pd) | |||
384 | struct padata_parallel_queue *pqueue; | 442 | struct padata_parallel_queue *pqueue; |
385 | 443 | ||
386 | cpu_index = 0; | 444 | cpu_index = 0; |
387 | for_each_cpu(cpu, pd->cpumask.pcpu) { | 445 | for_each_possible_cpu(cpu) { |
388 | pqueue = per_cpu_ptr(pd->pqueue, cpu); | 446 | pqueue = per_cpu_ptr(pd->pqueue, cpu); |
447 | |||
448 | if (!cpumask_test_cpu(cpu, pd->cpumask.pcpu)) { | ||
449 | pqueue->cpu_index = -1; | ||
450 | continue; | ||
451 | } | ||
452 | |||
389 | pqueue->pd = pd; | 453 | pqueue->pd = pd; |
390 | pqueue->cpu_index = cpu_index; | 454 | pqueue->cpu_index = cpu_index; |
391 | cpu_index++; | 455 | cpu_index++; |
@@ -393,6 +457,7 @@ static void padata_init_pqueues(struct parallel_data *pd) | |||
393 | __padata_list_init(&pqueue->reorder); | 457 | __padata_list_init(&pqueue->reorder); |
394 | __padata_list_init(&pqueue->parallel); | 458 | __padata_list_init(&pqueue->parallel); |
395 | INIT_WORK(&pqueue->work, padata_parallel_worker); | 459 | INIT_WORK(&pqueue->work, padata_parallel_worker); |
460 | INIT_WORK(&pqueue->reorder_work, invoke_padata_reorder); | ||
396 | atomic_set(&pqueue->num_obj, 0); | 461 | atomic_set(&pqueue->num_obj, 0); |
397 | } | 462 | } |
398 | } | 463 | } |
diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c index e24388a863a7..468fb7cd1221 100644 --- a/lib/mpi/mpi-pow.c +++ b/lib/mpi/mpi-pow.c | |||
@@ -26,6 +26,7 @@ | |||
26 | * however I decided to publish this code under the plain GPL. | 26 | * however I decided to publish this code under the plain GPL. |
27 | */ | 27 | */ |
28 | 28 | ||
29 | #include <linux/sched.h> | ||
29 | #include <linux/string.h> | 30 | #include <linux/string.h> |
30 | #include "mpi-internal.h" | 31 | #include "mpi-internal.h" |
31 | #include "longlong.h" | 32 | #include "longlong.h" |
@@ -256,6 +257,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) | |||
256 | } | 257 | } |
257 | e <<= 1; | 258 | e <<= 1; |
258 | c--; | 259 | c--; |
260 | cond_resched(); | ||
259 | } | 261 | } |
260 | 262 | ||
261 | i--; | 263 | i--; |
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 37db44f60718..4dd95cdd8070 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c | |||
@@ -240,7 +240,7 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb) | |||
240 | if (err == -EINPROGRESS) | 240 | if (err == -EINPROGRESS) |
241 | goto out; | 241 | goto out; |
242 | 242 | ||
243 | if (err == -EBUSY) | 243 | if (err == -ENOSPC) |
244 | err = NET_XMIT_DROP; | 244 | err = NET_XMIT_DROP; |
245 | goto out_free; | 245 | goto out_free; |
246 | } | 246 | } |
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index b00e4a43b4dc..d57aa64fa7c7 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c | |||
@@ -432,7 +432,7 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * | |||
432 | case -EINPROGRESS: | 432 | case -EINPROGRESS: |
433 | goto error; | 433 | goto error; |
434 | 434 | ||
435 | case -EBUSY: | 435 | case -ENOSPC: |
436 | err = NET_XMIT_DROP; | 436 | err = NET_XMIT_DROP; |
437 | break; | 437 | break; |
438 | 438 | ||
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index 7802b72196f3..3bd9d806b506 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c | |||
@@ -443,7 +443,7 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb) | |||
443 | if (err == -EINPROGRESS) | 443 | if (err == -EINPROGRESS) |
444 | goto out; | 444 | goto out; |
445 | 445 | ||
446 | if (err == -EBUSY) | 446 | if (err == -ENOSPC) |
447 | err = NET_XMIT_DROP; | 447 | err = NET_XMIT_DROP; |
448 | goto out_free; | 448 | goto out_free; |
449 | } | 449 | } |
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 89910e2c10f4..c04d995df37c 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c | |||
@@ -396,7 +396,7 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info | |||
396 | case -EINPROGRESS: | 396 | case -EINPROGRESS: |
397 | goto error; | 397 | goto error; |
398 | 398 | ||
399 | case -EBUSY: | 399 | case -ENOSPC: |
400 | err = NET_XMIT_DROP; | 400 | err = NET_XMIT_DROP; |
401 | break; | 401 | break; |
402 | 402 | ||
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c index a856d8c9c9f3..9057b163c378 100644 --- a/security/integrity/ima/ima_crypto.c +++ b/security/integrity/ima/ima_crypto.c | |||
@@ -27,11 +27,6 @@ | |||
27 | 27 | ||
28 | #include "ima.h" | 28 | #include "ima.h" |
29 | 29 | ||
30 | struct ahash_completion { | ||
31 | struct completion completion; | ||
32 | int err; | ||
33 | }; | ||
34 | |||
35 | /* minimum file size for ahash use */ | 30 | /* minimum file size for ahash use */ |
36 | static unsigned long ima_ahash_minsize; | 31 | static unsigned long ima_ahash_minsize; |
37 | module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644); | 32 | module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644); |
@@ -196,30 +191,13 @@ static void ima_free_atfm(struct crypto_ahash *tfm) | |||
196 | crypto_free_ahash(tfm); | 191 | crypto_free_ahash(tfm); |
197 | } | 192 | } |
198 | 193 | ||
199 | static void ahash_complete(struct crypto_async_request *req, int err) | 194 | static inline int ahash_wait(int err, struct crypto_wait *wait) |
200 | { | 195 | { |
201 | struct ahash_completion *res = req->data; | ||
202 | 196 | ||
203 | if (err == -EINPROGRESS) | 197 | err = crypto_wait_req(err, wait); |
204 | return; | ||
205 | res->err = err; | ||
206 | complete(&res->completion); | ||
207 | } | ||
208 | 198 | ||
209 | static int ahash_wait(int err, struct ahash_completion *res) | 199 | if (err) |
210 | { | ||
211 | switch (err) { | ||
212 | case 0: | ||
213 | break; | ||
214 | case -EINPROGRESS: | ||
215 | case -EBUSY: | ||
216 | wait_for_completion(&res->completion); | ||
217 | reinit_completion(&res->completion); | ||
218 | err = res->err; | ||
219 | /* fall through */ | ||
220 | default: | ||
221 | pr_crit_ratelimited("ahash calculation failed: err: %d\n", err); | 200 | pr_crit_ratelimited("ahash calculation failed: err: %d\n", err); |
222 | } | ||
223 | 201 | ||
224 | return err; | 202 | return err; |
225 | } | 203 | } |
@@ -233,7 +211,7 @@ static int ima_calc_file_hash_atfm(struct file *file, | |||
233 | int rc, read = 0, rbuf_len, active = 0, ahash_rc = 0; | 211 | int rc, read = 0, rbuf_len, active = 0, ahash_rc = 0; |
234 | struct ahash_request *req; | 212 | struct ahash_request *req; |
235 | struct scatterlist sg[1]; | 213 | struct scatterlist sg[1]; |
236 | struct ahash_completion res; | 214 | struct crypto_wait wait; |
237 | size_t rbuf_size[2]; | 215 | size_t rbuf_size[2]; |
238 | 216 | ||
239 | hash->length = crypto_ahash_digestsize(tfm); | 217 | hash->length = crypto_ahash_digestsize(tfm); |
@@ -242,12 +220,12 @@ static int ima_calc_file_hash_atfm(struct file *file, | |||
242 | if (!req) | 220 | if (!req) |
243 | return -ENOMEM; | 221 | return -ENOMEM; |
244 | 222 | ||
245 | init_completion(&res.completion); | 223 | crypto_init_wait(&wait); |
246 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | | 224 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | |
247 | CRYPTO_TFM_REQ_MAY_SLEEP, | 225 | CRYPTO_TFM_REQ_MAY_SLEEP, |
248 | ahash_complete, &res); | 226 | crypto_req_done, &wait); |
249 | 227 | ||
250 | rc = ahash_wait(crypto_ahash_init(req), &res); | 228 | rc = ahash_wait(crypto_ahash_init(req), &wait); |
251 | if (rc) | 229 | if (rc) |
252 | goto out1; | 230 | goto out1; |
253 | 231 | ||
@@ -288,7 +266,7 @@ static int ima_calc_file_hash_atfm(struct file *file, | |||
288 | * read/request, wait for the completion of the | 266 | * read/request, wait for the completion of the |
289 | * previous ahash_update() request. | 267 | * previous ahash_update() request. |
290 | */ | 268 | */ |
291 | rc = ahash_wait(ahash_rc, &res); | 269 | rc = ahash_wait(ahash_rc, &wait); |
292 | if (rc) | 270 | if (rc) |
293 | goto out3; | 271 | goto out3; |
294 | } | 272 | } |
@@ -304,7 +282,7 @@ static int ima_calc_file_hash_atfm(struct file *file, | |||
304 | * read/request, wait for the completion of the | 282 | * read/request, wait for the completion of the |
305 | * previous ahash_update() request. | 283 | * previous ahash_update() request. |
306 | */ | 284 | */ |
307 | rc = ahash_wait(ahash_rc, &res); | 285 | rc = ahash_wait(ahash_rc, &wait); |
308 | if (rc) | 286 | if (rc) |
309 | goto out3; | 287 | goto out3; |
310 | } | 288 | } |
@@ -318,7 +296,7 @@ static int ima_calc_file_hash_atfm(struct file *file, | |||
318 | active = !active; /* swap buffers, if we use two */ | 296 | active = !active; /* swap buffers, if we use two */ |
319 | } | 297 | } |
320 | /* wait for the last update request to complete */ | 298 | /* wait for the last update request to complete */ |
321 | rc = ahash_wait(ahash_rc, &res); | 299 | rc = ahash_wait(ahash_rc, &wait); |
322 | out3: | 300 | out3: |
323 | if (read) | 301 | if (read) |
324 | file->f_mode &= ~FMODE_READ; | 302 | file->f_mode &= ~FMODE_READ; |
@@ -327,7 +305,7 @@ out3: | |||
327 | out2: | 305 | out2: |
328 | if (!rc) { | 306 | if (!rc) { |
329 | ahash_request_set_crypt(req, NULL, hash->digest, 0); | 307 | ahash_request_set_crypt(req, NULL, hash->digest, 0); |
330 | rc = ahash_wait(crypto_ahash_final(req), &res); | 308 | rc = ahash_wait(crypto_ahash_final(req), &wait); |
331 | } | 309 | } |
332 | out1: | 310 | out1: |
333 | ahash_request_free(req); | 311 | ahash_request_free(req); |
@@ -537,7 +515,7 @@ static int calc_buffer_ahash_atfm(const void *buf, loff_t len, | |||
537 | { | 515 | { |
538 | struct ahash_request *req; | 516 | struct ahash_request *req; |
539 | struct scatterlist sg; | 517 | struct scatterlist sg; |
540 | struct ahash_completion res; | 518 | struct crypto_wait wait; |
541 | int rc, ahash_rc = 0; | 519 | int rc, ahash_rc = 0; |
542 | 520 | ||
543 | hash->length = crypto_ahash_digestsize(tfm); | 521 | hash->length = crypto_ahash_digestsize(tfm); |
@@ -546,12 +524,12 @@ static int calc_buffer_ahash_atfm(const void *buf, loff_t len, | |||
546 | if (!req) | 524 | if (!req) |
547 | return -ENOMEM; | 525 | return -ENOMEM; |
548 | 526 | ||
549 | init_completion(&res.completion); | 527 | crypto_init_wait(&wait); |
550 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | | 528 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | |
551 | CRYPTO_TFM_REQ_MAY_SLEEP, | 529 | CRYPTO_TFM_REQ_MAY_SLEEP, |
552 | ahash_complete, &res); | 530 | crypto_req_done, &wait); |
553 | 531 | ||
554 | rc = ahash_wait(crypto_ahash_init(req), &res); | 532 | rc = ahash_wait(crypto_ahash_init(req), &wait); |
555 | if (rc) | 533 | if (rc) |
556 | goto out; | 534 | goto out; |
557 | 535 | ||
@@ -561,10 +539,10 @@ static int calc_buffer_ahash_atfm(const void *buf, loff_t len, | |||
561 | ahash_rc = crypto_ahash_update(req); | 539 | ahash_rc = crypto_ahash_update(req); |
562 | 540 | ||
563 | /* wait for the update request to complete */ | 541 | /* wait for the update request to complete */ |
564 | rc = ahash_wait(ahash_rc, &res); | 542 | rc = ahash_wait(ahash_rc, &wait); |
565 | if (!rc) { | 543 | if (!rc) { |
566 | ahash_request_set_crypt(req, NULL, hash->digest, 0); | 544 | ahash_request_set_crypt(req, NULL, hash->digest, 0); |
567 | rc = ahash_wait(crypto_ahash_final(req), &res); | 545 | rc = ahash_wait(crypto_ahash_final(req), &wait); |
568 | } | 546 | } |
569 | out: | 547 | out: |
570 | ahash_request_free(req); | 548 | ahash_request_free(req); |