diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-08 06:44:48 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-08 06:44:48 -0400 |
commit | 87d7bcee4f5973a593b0d50134364cfe5652ff33 (patch) | |
tree | 677125896b64de2f5acfa204955442f58e74cfa9 /crypto | |
parent | 0223f9aaef94a09ffc0b6abcba732e62a483b88c (diff) | |
parent | be34c4ef693ff5c10f55606dbd656ddf0b4a8340 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu:
- add multibuffer infrastructure (single_task_running scheduler helper,
OKed by Peter on lkml.
- add SHA1 multibuffer implementation for AVX2.
- reenable "by8" AVX CTR optimisation after fixing counter overflow.
- add APM X-Gene SoC RNG support.
- SHA256/SHA512 now handles unaligned input correctly.
- set lz4 decompressed length correctly.
- fix algif socket buffer allocation failure for 64K page machines.
- misc fixes
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (47 commits)
crypto: sha - Handle unaligned input data in generic sha256 and sha512.
Revert "crypto: aesni - disable "by8" AVX CTR optimization"
crypto: aesni - remove unused defines in "by8" variant
crypto: aesni - fix counter overflow handling in "by8" variant
hwrng: printk replacement
crypto: qat - Removed unneeded partial state
crypto: qat - Fix typo in name of tasklet_struct
crypto: caam - Dynamic allocation of addresses for various memory blocks in CAAM.
crypto: mcryptd - Fix typos in CRYPTO_MCRYPTD description
crypto: algif - avoid excessive use of socket buffer in skcipher
arm64: dts: add random number generator dts node to APM X-Gene platform.
Documentation: rng: Add X-Gene SoC RNG driver documentation
hwrng: xgene - add support for APM X-Gene SoC RNG support
crypto: mv_cesa - Add missing #define
crypto: testmgr - add test for lz4 and lz4hc
crypto: lz4,lz4hc - fix decompression
crypto: qat - Use pci_enable_msix_exact() instead of pci_enable_msix()
crypto: drbg - fix maximum value checks on 32 bit systems
crypto: drbg - fix sparse warning for cpu_to_be[32|64]
crypto: sha-mb - sha1_mb_alg_state can be static
...
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/Kconfig | 30 | ||||
-rw-r--r-- | crypto/Makefile | 1 | ||||
-rw-r--r-- | crypto/ahash.c | 12 | ||||
-rw-r--r-- | crypto/algif_skcipher.c | 2 | ||||
-rw-r--r-- | crypto/drbg.c | 130 | ||||
-rw-r--r-- | crypto/lz4.c | 2 | ||||
-rw-r--r-- | crypto/lz4hc.c | 2 | ||||
-rw-r--r-- | crypto/mcryptd.c | 705 | ||||
-rw-r--r-- | crypto/sha256_generic.c | 3 | ||||
-rw-r--r-- | crypto/sha512_generic.c | 3 | ||||
-rw-r--r-- | crypto/testmgr.c | 966 | ||||
-rw-r--r-- | crypto/testmgr.h | 66 |
12 files changed, 1339 insertions, 583 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig index 77daef031db5..87bbc9c1e681 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
@@ -158,6 +158,20 @@ config CRYPTO_CRYPTD | |||
158 | converts an arbitrary synchronous software crypto algorithm | 158 | converts an arbitrary synchronous software crypto algorithm |
159 | into an asynchronous algorithm that executes in a kernel thread. | 159 | into an asynchronous algorithm that executes in a kernel thread. |
160 | 160 | ||
161 | config CRYPTO_MCRYPTD | ||
162 | tristate "Software async multi-buffer crypto daemon" | ||
163 | select CRYPTO_BLKCIPHER | ||
164 | select CRYPTO_HASH | ||
165 | select CRYPTO_MANAGER | ||
166 | select CRYPTO_WORKQUEUE | ||
167 | help | ||
168 | This is a generic software asynchronous crypto daemon that | ||
169 | provides the kernel thread to assist multi-buffer crypto | ||
170 | algorithms for submitting jobs and flushing jobs in multi-buffer | ||
171 | crypto algorithms. Multi-buffer crypto algorithms are executed | ||
172 | in the context of this kernel thread and drivers can post | ||
173 | their crypto request asynchronously to be processed by this daemon. | ||
174 | |||
161 | config CRYPTO_AUTHENC | 175 | config CRYPTO_AUTHENC |
162 | tristate "Authenc support" | 176 | tristate "Authenc support" |
163 | select CRYPTO_AEAD | 177 | select CRYPTO_AEAD |
@@ -559,6 +573,22 @@ config CRYPTO_SHA1_PPC | |||
559 | This is the powerpc hardware accelerated implementation of the | 573 | This is the powerpc hardware accelerated implementation of the |
560 | SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). | 574 | SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). |
561 | 575 | ||
576 | config CRYPTO_SHA1_MB | ||
577 | tristate "SHA1 digest algorithm (x86_64 Multi-Buffer, Experimental)" | ||
578 | depends on X86 && 64BIT | ||
579 | select CRYPTO_SHA1 | ||
580 | select CRYPTO_HASH | ||
581 | select CRYPTO_MCRYPTD | ||
582 | help | ||
583 | SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented | ||
584 | using multi-buffer technique. This algorithm computes on | ||
585 | multiple data lanes concurrently with SIMD instructions for | ||
586 | better throughput. It should not be enabled by default but | ||
587 | used when there is significant amount of work to keep the keep | ||
588 | the data lanes filled to get performance benefit. If the data | ||
589 | lanes remain unfilled, a flush operation will be initiated to | ||
590 | process the crypto jobs, adding a slight latency. | ||
591 | |||
562 | config CRYPTO_SHA256 | 592 | config CRYPTO_SHA256 |
563 | tristate "SHA224 and SHA256 digest algorithm" | 593 | tristate "SHA224 and SHA256 digest algorithm" |
564 | select CRYPTO_HASH | 594 | select CRYPTO_HASH |
diff --git a/crypto/Makefile b/crypto/Makefile index cfa57b3f5a4d..1445b9100c05 100644 --- a/crypto/Makefile +++ b/crypto/Makefile | |||
@@ -60,6 +60,7 @@ obj-$(CONFIG_CRYPTO_GCM) += gcm.o | |||
60 | obj-$(CONFIG_CRYPTO_CCM) += ccm.o | 60 | obj-$(CONFIG_CRYPTO_CCM) += ccm.o |
61 | obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o | 61 | obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o |
62 | obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o | 62 | obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o |
63 | obj-$(CONFIG_CRYPTO_MCRYPTD) += mcryptd.o | ||
63 | obj-$(CONFIG_CRYPTO_DES) += des_generic.o | 64 | obj-$(CONFIG_CRYPTO_DES) += des_generic.o |
64 | obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o | 65 | obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o |
65 | obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish_generic.o | 66 | obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish_generic.o |
diff --git a/crypto/ahash.c b/crypto/ahash.c index f2a5d8f656ff..f6a36a52d738 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c | |||
@@ -131,8 +131,10 @@ int crypto_hash_walk_first(struct ahash_request *req, | |||
131 | { | 131 | { |
132 | walk->total = req->nbytes; | 132 | walk->total = req->nbytes; |
133 | 133 | ||
134 | if (!walk->total) | 134 | if (!walk->total) { |
135 | walk->entrylen = 0; | ||
135 | return 0; | 136 | return 0; |
137 | } | ||
136 | 138 | ||
137 | walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); | 139 | walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); |
138 | walk->sg = req->src; | 140 | walk->sg = req->src; |
@@ -147,8 +149,10 @@ int crypto_ahash_walk_first(struct ahash_request *req, | |||
147 | { | 149 | { |
148 | walk->total = req->nbytes; | 150 | walk->total = req->nbytes; |
149 | 151 | ||
150 | if (!walk->total) | 152 | if (!walk->total) { |
153 | walk->entrylen = 0; | ||
151 | return 0; | 154 | return 0; |
155 | } | ||
152 | 156 | ||
153 | walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); | 157 | walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); |
154 | walk->sg = req->src; | 158 | walk->sg = req->src; |
@@ -167,8 +171,10 @@ int crypto_hash_walk_first_compat(struct hash_desc *hdesc, | |||
167 | { | 171 | { |
168 | walk->total = len; | 172 | walk->total = len; |
169 | 173 | ||
170 | if (!walk->total) | 174 | if (!walk->total) { |
175 | walk->entrylen = 0; | ||
171 | return 0; | 176 | return 0; |
177 | } | ||
172 | 178 | ||
173 | walk->alignmask = crypto_hash_alignmask(hdesc->tfm); | 179 | walk->alignmask = crypto_hash_alignmask(hdesc->tfm); |
174 | walk->sg = sg; | 180 | walk->sg = sg; |
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index a19c027b29bd..83187f497c7c 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c | |||
@@ -49,7 +49,7 @@ struct skcipher_ctx { | |||
49 | struct ablkcipher_request req; | 49 | struct ablkcipher_request req; |
50 | }; | 50 | }; |
51 | 51 | ||
52 | #define MAX_SGL_ENTS ((PAGE_SIZE - sizeof(struct skcipher_sg_list)) / \ | 52 | #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \ |
53 | sizeof(struct scatterlist) - 1) | 53 | sizeof(struct scatterlist) - 1) |
54 | 54 | ||
55 | static inline int skcipher_sndbuf(struct sock *sk) | 55 | static inline int skcipher_sndbuf(struct sock *sk) |
diff --git a/crypto/drbg.c b/crypto/drbg.c index a53ee099e281..54cfd4820abc 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c | |||
@@ -117,27 +117,18 @@ static const struct drbg_core drbg_cores[] = { | |||
117 | { | 117 | { |
118 | .flags = DRBG_CTR | DRBG_STRENGTH128, | 118 | .flags = DRBG_CTR | DRBG_STRENGTH128, |
119 | .statelen = 32, /* 256 bits as defined in 10.2.1 */ | 119 | .statelen = 32, /* 256 bits as defined in 10.2.1 */ |
120 | .max_addtllen = 35, | ||
121 | .max_bits = 19, | ||
122 | .max_req = 48, | ||
123 | .blocklen_bytes = 16, | 120 | .blocklen_bytes = 16, |
124 | .cra_name = "ctr_aes128", | 121 | .cra_name = "ctr_aes128", |
125 | .backend_cra_name = "ecb(aes)", | 122 | .backend_cra_name = "ecb(aes)", |
126 | }, { | 123 | }, { |
127 | .flags = DRBG_CTR | DRBG_STRENGTH192, | 124 | .flags = DRBG_CTR | DRBG_STRENGTH192, |
128 | .statelen = 40, /* 320 bits as defined in 10.2.1 */ | 125 | .statelen = 40, /* 320 bits as defined in 10.2.1 */ |
129 | .max_addtllen = 35, | ||
130 | .max_bits = 19, | ||
131 | .max_req = 48, | ||
132 | .blocklen_bytes = 16, | 126 | .blocklen_bytes = 16, |
133 | .cra_name = "ctr_aes192", | 127 | .cra_name = "ctr_aes192", |
134 | .backend_cra_name = "ecb(aes)", | 128 | .backend_cra_name = "ecb(aes)", |
135 | }, { | 129 | }, { |
136 | .flags = DRBG_CTR | DRBG_STRENGTH256, | 130 | .flags = DRBG_CTR | DRBG_STRENGTH256, |
137 | .statelen = 48, /* 384 bits as defined in 10.2.1 */ | 131 | .statelen = 48, /* 384 bits as defined in 10.2.1 */ |
138 | .max_addtllen = 35, | ||
139 | .max_bits = 19, | ||
140 | .max_req = 48, | ||
141 | .blocklen_bytes = 16, | 132 | .blocklen_bytes = 16, |
142 | .cra_name = "ctr_aes256", | 133 | .cra_name = "ctr_aes256", |
143 | .backend_cra_name = "ecb(aes)", | 134 | .backend_cra_name = "ecb(aes)", |
@@ -147,36 +138,24 @@ static const struct drbg_core drbg_cores[] = { | |||
147 | { | 138 | { |
148 | .flags = DRBG_HASH | DRBG_STRENGTH128, | 139 | .flags = DRBG_HASH | DRBG_STRENGTH128, |
149 | .statelen = 55, /* 440 bits */ | 140 | .statelen = 55, /* 440 bits */ |
150 | .max_addtllen = 35, | ||
151 | .max_bits = 19, | ||
152 | .max_req = 48, | ||
153 | .blocklen_bytes = 20, | 141 | .blocklen_bytes = 20, |
154 | .cra_name = "sha1", | 142 | .cra_name = "sha1", |
155 | .backend_cra_name = "sha1", | 143 | .backend_cra_name = "sha1", |
156 | }, { | 144 | }, { |
157 | .flags = DRBG_HASH | DRBG_STRENGTH256, | 145 | .flags = DRBG_HASH | DRBG_STRENGTH256, |
158 | .statelen = 111, /* 888 bits */ | 146 | .statelen = 111, /* 888 bits */ |
159 | .max_addtllen = 35, | ||
160 | .max_bits = 19, | ||
161 | .max_req = 48, | ||
162 | .blocklen_bytes = 48, | 147 | .blocklen_bytes = 48, |
163 | .cra_name = "sha384", | 148 | .cra_name = "sha384", |
164 | .backend_cra_name = "sha384", | 149 | .backend_cra_name = "sha384", |
165 | }, { | 150 | }, { |
166 | .flags = DRBG_HASH | DRBG_STRENGTH256, | 151 | .flags = DRBG_HASH | DRBG_STRENGTH256, |
167 | .statelen = 111, /* 888 bits */ | 152 | .statelen = 111, /* 888 bits */ |
168 | .max_addtllen = 35, | ||
169 | .max_bits = 19, | ||
170 | .max_req = 48, | ||
171 | .blocklen_bytes = 64, | 153 | .blocklen_bytes = 64, |
172 | .cra_name = "sha512", | 154 | .cra_name = "sha512", |
173 | .backend_cra_name = "sha512", | 155 | .backend_cra_name = "sha512", |
174 | }, { | 156 | }, { |
175 | .flags = DRBG_HASH | DRBG_STRENGTH256, | 157 | .flags = DRBG_HASH | DRBG_STRENGTH256, |
176 | .statelen = 55, /* 440 bits */ | 158 | .statelen = 55, /* 440 bits */ |
177 | .max_addtllen = 35, | ||
178 | .max_bits = 19, | ||
179 | .max_req = 48, | ||
180 | .blocklen_bytes = 32, | 159 | .blocklen_bytes = 32, |
181 | .cra_name = "sha256", | 160 | .cra_name = "sha256", |
182 | .backend_cra_name = "sha256", | 161 | .backend_cra_name = "sha256", |
@@ -186,36 +165,24 @@ static const struct drbg_core drbg_cores[] = { | |||
186 | { | 165 | { |
187 | .flags = DRBG_HMAC | DRBG_STRENGTH128, | 166 | .flags = DRBG_HMAC | DRBG_STRENGTH128, |
188 | .statelen = 20, /* block length of cipher */ | 167 | .statelen = 20, /* block length of cipher */ |
189 | .max_addtllen = 35, | ||
190 | .max_bits = 19, | ||
191 | .max_req = 48, | ||
192 | .blocklen_bytes = 20, | 168 | .blocklen_bytes = 20, |
193 | .cra_name = "hmac_sha1", | 169 | .cra_name = "hmac_sha1", |
194 | .backend_cra_name = "hmac(sha1)", | 170 | .backend_cra_name = "hmac(sha1)", |
195 | }, { | 171 | }, { |
196 | .flags = DRBG_HMAC | DRBG_STRENGTH256, | 172 | .flags = DRBG_HMAC | DRBG_STRENGTH256, |
197 | .statelen = 48, /* block length of cipher */ | 173 | .statelen = 48, /* block length of cipher */ |
198 | .max_addtllen = 35, | ||
199 | .max_bits = 19, | ||
200 | .max_req = 48, | ||
201 | .blocklen_bytes = 48, | 174 | .blocklen_bytes = 48, |
202 | .cra_name = "hmac_sha384", | 175 | .cra_name = "hmac_sha384", |
203 | .backend_cra_name = "hmac(sha384)", | 176 | .backend_cra_name = "hmac(sha384)", |
204 | }, { | 177 | }, { |
205 | .flags = DRBG_HMAC | DRBG_STRENGTH256, | 178 | .flags = DRBG_HMAC | DRBG_STRENGTH256, |
206 | .statelen = 64, /* block length of cipher */ | 179 | .statelen = 64, /* block length of cipher */ |
207 | .max_addtllen = 35, | ||
208 | .max_bits = 19, | ||
209 | .max_req = 48, | ||
210 | .blocklen_bytes = 64, | 180 | .blocklen_bytes = 64, |
211 | .cra_name = "hmac_sha512", | 181 | .cra_name = "hmac_sha512", |
212 | .backend_cra_name = "hmac(sha512)", | 182 | .backend_cra_name = "hmac(sha512)", |
213 | }, { | 183 | }, { |
214 | .flags = DRBG_HMAC | DRBG_STRENGTH256, | 184 | .flags = DRBG_HMAC | DRBG_STRENGTH256, |
215 | .statelen = 32, /* block length of cipher */ | 185 | .statelen = 32, /* block length of cipher */ |
216 | .max_addtllen = 35, | ||
217 | .max_bits = 19, | ||
218 | .max_req = 48, | ||
219 | .blocklen_bytes = 32, | 186 | .blocklen_bytes = 32, |
220 | .cra_name = "hmac_sha256", | 187 | .cra_name = "hmac_sha256", |
221 | .backend_cra_name = "hmac(sha256)", | 188 | .backend_cra_name = "hmac(sha256)", |
@@ -302,20 +269,19 @@ static bool drbg_fips_continuous_test(struct drbg_state *drbg, | |||
302 | * Convert an integer into a byte representation of this integer. | 269 | * Convert an integer into a byte representation of this integer. |
303 | * The byte representation is big-endian | 270 | * The byte representation is big-endian |
304 | * | 271 | * |
305 | * @buf buffer holding the converted integer | ||
306 | * @val value to be converted | 272 | * @val value to be converted |
307 | * @buflen length of buffer | 273 | * @buf buffer holding the converted integer -- caller must ensure that |
274 | * buffer size is at least 32 bit | ||
308 | */ | 275 | */ |
309 | #if (defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR)) | 276 | #if (defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR)) |
310 | static inline void drbg_int2byte(unsigned char *buf, uint64_t val, | 277 | static inline void drbg_cpu_to_be32(__u32 val, unsigned char *buf) |
311 | size_t buflen) | ||
312 | { | 278 | { |
313 | unsigned char *byte; | 279 | struct s { |
314 | uint64_t i; | 280 | __be32 conv; |
281 | }; | ||
282 | struct s *conversion = (struct s *) buf; | ||
315 | 283 | ||
316 | byte = buf + (buflen - 1); | 284 | conversion->conv = cpu_to_be32(val); |
317 | for (i = 0; i < buflen; i++) | ||
318 | *(byte--) = val >> (i * 8) & 0xff; | ||
319 | } | 285 | } |
320 | 286 | ||
321 | /* | 287 | /* |
@@ -483,10 +449,10 @@ static int drbg_ctr_df(struct drbg_state *drbg, | |||
483 | /* 10.4.2 step 2 -- calculate the entire length of all input data */ | 449 | /* 10.4.2 step 2 -- calculate the entire length of all input data */ |
484 | list_for_each_entry(seed, seedlist, list) | 450 | list_for_each_entry(seed, seedlist, list) |
485 | inputlen += seed->len; | 451 | inputlen += seed->len; |
486 | drbg_int2byte(&L_N[0], inputlen, 4); | 452 | drbg_cpu_to_be32(inputlen, &L_N[0]); |
487 | 453 | ||
488 | /* 10.4.2 step 3 */ | 454 | /* 10.4.2 step 3 */ |
489 | drbg_int2byte(&L_N[4], bytes_to_return, 4); | 455 | drbg_cpu_to_be32(bytes_to_return, &L_N[4]); |
490 | 456 | ||
491 | /* 10.4.2 step 5: length is L_N, input_string, one byte, padding */ | 457 | /* 10.4.2 step 5: length is L_N, input_string, one byte, padding */ |
492 | padlen = (inputlen + sizeof(L_N) + 1) % (drbg_blocklen(drbg)); | 458 | padlen = (inputlen + sizeof(L_N) + 1) % (drbg_blocklen(drbg)); |
@@ -517,7 +483,7 @@ static int drbg_ctr_df(struct drbg_state *drbg, | |||
517 | * holds zeros after allocation -- even the increment of i | 483 | * holds zeros after allocation -- even the increment of i |
518 | * is irrelevant as the increment remains within length of i | 484 | * is irrelevant as the increment remains within length of i |
519 | */ | 485 | */ |
520 | drbg_int2byte(iv, i, 4); | 486 | drbg_cpu_to_be32(i, iv); |
521 | /* 10.4.2 step 9.2 -- BCC and concatenation with temp */ | 487 | /* 10.4.2 step 9.2 -- BCC and concatenation with temp */ |
522 | ret = drbg_ctr_bcc(drbg, temp + templen, K, &bcc_list); | 488 | ret = drbg_ctr_bcc(drbg, temp + templen, K, &bcc_list); |
523 | if (ret) | 489 | if (ret) |
@@ -729,11 +695,9 @@ static int drbg_hmac_update(struct drbg_state *drbg, struct list_head *seed, | |||
729 | LIST_HEAD(seedlist); | 695 | LIST_HEAD(seedlist); |
730 | LIST_HEAD(vdatalist); | 696 | LIST_HEAD(vdatalist); |
731 | 697 | ||
732 | if (!reseed) { | 698 | if (!reseed) |
733 | /* 10.1.2.3 step 2 */ | 699 | /* 10.1.2.3 step 2 -- memset(0) of C is implicit with kzalloc */ |
734 | memset(drbg->C, 0, drbg_statelen(drbg)); | ||
735 | memset(drbg->V, 1, drbg_statelen(drbg)); | 700 | memset(drbg->V, 1, drbg_statelen(drbg)); |
736 | } | ||
737 | 701 | ||
738 | drbg_string_fill(&seed1, drbg->V, drbg_statelen(drbg)); | 702 | drbg_string_fill(&seed1, drbg->V, drbg_statelen(drbg)); |
739 | list_add_tail(&seed1.list, &seedlist); | 703 | list_add_tail(&seed1.list, &seedlist); |
@@ -862,7 +826,7 @@ static int drbg_hash_df(struct drbg_state *drbg, | |||
862 | 826 | ||
863 | /* 10.4.1 step 3 */ | 827 | /* 10.4.1 step 3 */ |
864 | input[0] = 1; | 828 | input[0] = 1; |
865 | drbg_int2byte(&input[1], (outlen * 8), 4); | 829 | drbg_cpu_to_be32((outlen * 8), &input[1]); |
866 | 830 | ||
867 | /* 10.4.1 step 4.1 -- concatenation of data for input into hash */ | 831 | /* 10.4.1 step 4.1 -- concatenation of data for input into hash */ |
868 | drbg_string_fill(&data, input, 5); | 832 | drbg_string_fill(&data, input, 5); |
@@ -1023,7 +987,10 @@ static int drbg_hash_generate(struct drbg_state *drbg, | |||
1023 | { | 987 | { |
1024 | int len = 0; | 988 | int len = 0; |
1025 | int ret = 0; | 989 | int ret = 0; |
1026 | unsigned char req[8]; | 990 | union { |
991 | unsigned char req[8]; | ||
992 | __be64 req_int; | ||
993 | } u; | ||
1027 | unsigned char prefix = DRBG_PREFIX3; | 994 | unsigned char prefix = DRBG_PREFIX3; |
1028 | struct drbg_string data1, data2; | 995 | struct drbg_string data1, data2; |
1029 | LIST_HEAD(datalist); | 996 | LIST_HEAD(datalist); |
@@ -1053,8 +1020,8 @@ static int drbg_hash_generate(struct drbg_state *drbg, | |||
1053 | drbg->scratchpad, drbg_blocklen(drbg)); | 1020 | drbg->scratchpad, drbg_blocklen(drbg)); |
1054 | drbg_add_buf(drbg->V, drbg_statelen(drbg), | 1021 | drbg_add_buf(drbg->V, drbg_statelen(drbg), |
1055 | drbg->C, drbg_statelen(drbg)); | 1022 | drbg->C, drbg_statelen(drbg)); |
1056 | drbg_int2byte(req, drbg->reseed_ctr, sizeof(req)); | 1023 | u.req_int = cpu_to_be64(drbg->reseed_ctr); |
1057 | drbg_add_buf(drbg->V, drbg_statelen(drbg), req, 8); | 1024 | drbg_add_buf(drbg->V, drbg_statelen(drbg), u.req, 8); |
1058 | 1025 | ||
1059 | out: | 1026 | out: |
1060 | memset(drbg->scratchpad, 0, drbg_blocklen(drbg)); | 1027 | memset(drbg->scratchpad, 0, drbg_blocklen(drbg)); |
@@ -1142,6 +1109,11 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, | |||
1142 | pr_devel("DRBG: using personalization string\n"); | 1109 | pr_devel("DRBG: using personalization string\n"); |
1143 | } | 1110 | } |
1144 | 1111 | ||
1112 | if (!reseed) { | ||
1113 | memset(drbg->V, 0, drbg_statelen(drbg)); | ||
1114 | memset(drbg->C, 0, drbg_statelen(drbg)); | ||
1115 | } | ||
1116 | |||
1145 | ret = drbg->d_ops->update(drbg, &seedlist, reseed); | 1117 | ret = drbg->d_ops->update(drbg, &seedlist, reseed); |
1146 | if (ret) | 1118 | if (ret) |
1147 | goto out; | 1119 | goto out; |
@@ -1151,8 +1123,7 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, | |||
1151 | drbg->reseed_ctr = 1; | 1123 | drbg->reseed_ctr = 1; |
1152 | 1124 | ||
1153 | out: | 1125 | out: |
1154 | if (entropy) | 1126 | kzfree(entropy); |
1155 | kzfree(entropy); | ||
1156 | return ret; | 1127 | return ret; |
1157 | } | 1128 | } |
1158 | 1129 | ||
@@ -1161,19 +1132,15 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg) | |||
1161 | { | 1132 | { |
1162 | if (!drbg) | 1133 | if (!drbg) |
1163 | return; | 1134 | return; |
1164 | if (drbg->V) | 1135 | kzfree(drbg->V); |
1165 | kzfree(drbg->V); | ||
1166 | drbg->V = NULL; | 1136 | drbg->V = NULL; |
1167 | if (drbg->C) | 1137 | kzfree(drbg->C); |
1168 | kzfree(drbg->C); | ||
1169 | drbg->C = NULL; | 1138 | drbg->C = NULL; |
1170 | if (drbg->scratchpad) | 1139 | kzfree(drbg->scratchpad); |
1171 | kzfree(drbg->scratchpad); | ||
1172 | drbg->scratchpad = NULL; | 1140 | drbg->scratchpad = NULL; |
1173 | drbg->reseed_ctr = 0; | 1141 | drbg->reseed_ctr = 0; |
1174 | #ifdef CONFIG_CRYPTO_FIPS | 1142 | #ifdef CONFIG_CRYPTO_FIPS |
1175 | if (drbg->prev) | 1143 | kzfree(drbg->prev); |
1176 | kzfree(drbg->prev); | ||
1177 | drbg->prev = NULL; | 1144 | drbg->prev = NULL; |
1178 | drbg->fips_primed = false; | 1145 | drbg->fips_primed = false; |
1179 | #endif | 1146 | #endif |
@@ -1188,17 +1155,14 @@ static inline int drbg_alloc_state(struct drbg_state *drbg) | |||
1188 | int ret = -ENOMEM; | 1155 | int ret = -ENOMEM; |
1189 | unsigned int sb_size = 0; | 1156 | unsigned int sb_size = 0; |
1190 | 1157 | ||
1191 | if (!drbg) | 1158 | drbg->V = kmalloc(drbg_statelen(drbg), GFP_KERNEL); |
1192 | return -EINVAL; | ||
1193 | |||
1194 | drbg->V = kzalloc(drbg_statelen(drbg), GFP_KERNEL); | ||
1195 | if (!drbg->V) | 1159 | if (!drbg->V) |
1196 | goto err; | 1160 | goto err; |
1197 | drbg->C = kzalloc(drbg_statelen(drbg), GFP_KERNEL); | 1161 | drbg->C = kmalloc(drbg_statelen(drbg), GFP_KERNEL); |
1198 | if (!drbg->C) | 1162 | if (!drbg->C) |
1199 | goto err; | 1163 | goto err; |
1200 | #ifdef CONFIG_CRYPTO_FIPS | 1164 | #ifdef CONFIG_CRYPTO_FIPS |
1201 | drbg->prev = kzalloc(drbg_blocklen(drbg), GFP_KERNEL); | 1165 | drbg->prev = kmalloc(drbg_blocklen(drbg), GFP_KERNEL); |
1202 | if (!drbg->prev) | 1166 | if (!drbg->prev) |
1203 | goto err; | 1167 | goto err; |
1204 | drbg->fips_primed = false; | 1168 | drbg->fips_primed = false; |
@@ -1263,15 +1227,6 @@ static int drbg_make_shadow(struct drbg_state *drbg, struct drbg_state **shadow) | |||
1263 | int ret = -ENOMEM; | 1227 | int ret = -ENOMEM; |
1264 | struct drbg_state *tmp = NULL; | 1228 | struct drbg_state *tmp = NULL; |
1265 | 1229 | ||
1266 | if (!drbg || !drbg->core || !drbg->V || !drbg->C) { | ||
1267 | pr_devel("DRBG: attempt to generate shadow copy for " | ||
1268 | "uninitialized DRBG state rejected\n"); | ||
1269 | return -EINVAL; | ||
1270 | } | ||
1271 | /* HMAC does not have a scratchpad */ | ||
1272 | if (!(drbg->core->flags & DRBG_HMAC) && NULL == drbg->scratchpad) | ||
1273 | return -EINVAL; | ||
1274 | |||
1275 | tmp = kzalloc(sizeof(struct drbg_state), GFP_KERNEL); | 1230 | tmp = kzalloc(sizeof(struct drbg_state), GFP_KERNEL); |
1276 | if (!tmp) | 1231 | if (!tmp) |
1277 | return -ENOMEM; | 1232 | return -ENOMEM; |
@@ -1293,8 +1248,7 @@ static int drbg_make_shadow(struct drbg_state *drbg, struct drbg_state **shadow) | |||
1293 | return 0; | 1248 | return 0; |
1294 | 1249 | ||
1295 | err: | 1250 | err: |
1296 | if (tmp) | 1251 | kzfree(tmp); |
1297 | kzfree(tmp); | ||
1298 | return ret; | 1252 | return ret; |
1299 | } | 1253 | } |
1300 | 1254 | ||
@@ -1385,11 +1339,9 @@ static int drbg_generate(struct drbg_state *drbg, | |||
1385 | shadow->seeded = false; | 1339 | shadow->seeded = false; |
1386 | 1340 | ||
1387 | /* allocate cipher handle */ | 1341 | /* allocate cipher handle */ |
1388 | if (shadow->d_ops->crypto_init) { | 1342 | len = shadow->d_ops->crypto_init(shadow); |
1389 | len = shadow->d_ops->crypto_init(shadow); | 1343 | if (len) |
1390 | if (len) | 1344 | goto err; |
1391 | goto err; | ||
1392 | } | ||
1393 | 1345 | ||
1394 | if (shadow->pr || !shadow->seeded) { | 1346 | if (shadow->pr || !shadow->seeded) { |
1395 | pr_devel("DRBG: reseeding before generation (prediction " | 1347 | pr_devel("DRBG: reseeding before generation (prediction " |
@@ -1471,8 +1423,7 @@ static int drbg_generate(struct drbg_state *drbg, | |||
1471 | #endif | 1423 | #endif |
1472 | 1424 | ||
1473 | err: | 1425 | err: |
1474 | if (shadow->d_ops->crypto_fini) | 1426 | shadow->d_ops->crypto_fini(shadow); |
1475 | shadow->d_ops->crypto_fini(shadow); | ||
1476 | drbg_restore_shadow(drbg, &shadow); | 1427 | drbg_restore_shadow(drbg, &shadow); |
1477 | return len; | 1428 | return len; |
1478 | } | 1429 | } |
@@ -1566,11 +1517,10 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers, | |||
1566 | return ret; | 1517 | return ret; |
1567 | 1518 | ||
1568 | ret = -EFAULT; | 1519 | ret = -EFAULT; |
1569 | if (drbg->d_ops->crypto_init && drbg->d_ops->crypto_init(drbg)) | 1520 | if (drbg->d_ops->crypto_init(drbg)) |
1570 | goto err; | 1521 | goto err; |
1571 | ret = drbg_seed(drbg, pers, false); | 1522 | ret = drbg_seed(drbg, pers, false); |
1572 | if (drbg->d_ops->crypto_fini) | 1523 | drbg->d_ops->crypto_fini(drbg); |
1573 | drbg->d_ops->crypto_fini(drbg); | ||
1574 | if (ret) | 1524 | if (ret) |
1575 | goto err; | 1525 | goto err; |
1576 | 1526 | ||
diff --git a/crypto/lz4.c b/crypto/lz4.c index 4586dd15b0d8..34d072b72a73 100644 --- a/crypto/lz4.c +++ b/crypto/lz4.c | |||
@@ -68,7 +68,7 @@ static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src, | |||
68 | size_t tmp_len = *dlen; | 68 | size_t tmp_len = *dlen; |
69 | size_t __slen = slen; | 69 | size_t __slen = slen; |
70 | 70 | ||
71 | err = lz4_decompress(src, &__slen, dst, tmp_len); | 71 | err = lz4_decompress_unknownoutputsize(src, __slen, dst, &tmp_len); |
72 | if (err < 0) | 72 | if (err < 0) |
73 | return -EINVAL; | 73 | return -EINVAL; |
74 | 74 | ||
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c index 151ba31d34e3..9218b3fed5e3 100644 --- a/crypto/lz4hc.c +++ b/crypto/lz4hc.c | |||
@@ -68,7 +68,7 @@ static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src, | |||
68 | size_t tmp_len = *dlen; | 68 | size_t tmp_len = *dlen; |
69 | size_t __slen = slen; | 69 | size_t __slen = slen; |
70 | 70 | ||
71 | err = lz4_decompress(src, &__slen, dst, tmp_len); | 71 | err = lz4_decompress_unknownoutputsize(src, __slen, dst, &tmp_len); |
72 | if (err < 0) | 72 | if (err < 0) |
73 | return -EINVAL; | 73 | return -EINVAL; |
74 | 74 | ||
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c new file mode 100644 index 000000000000..b39fbd530102 --- /dev/null +++ b/crypto/mcryptd.c | |||
@@ -0,0 +1,705 @@ | |||
1 | /* | ||
2 | * Software multibuffer async crypto daemon. | ||
3 | * | ||
4 | * Copyright (c) 2014 Tim Chen <tim.c.chen@linux.intel.com> | ||
5 | * | ||
6 | * Adapted from crypto daemon. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the Free | ||
10 | * Software Foundation; either version 2 of the License, or (at your option) | ||
11 | * any later version. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <crypto/algapi.h> | ||
16 | #include <crypto/internal/hash.h> | ||
17 | #include <crypto/internal/aead.h> | ||
18 | #include <crypto/mcryptd.h> | ||
19 | #include <crypto/crypto_wq.h> | ||
20 | #include <linux/err.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/list.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/scatterlist.h> | ||
26 | #include <linux/sched.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/hardirq.h> | ||
29 | |||
30 | #define MCRYPTD_MAX_CPU_QLEN 100 | ||
31 | #define MCRYPTD_BATCH 9 | ||
32 | |||
33 | static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, | ||
34 | unsigned int tail); | ||
35 | |||
36 | struct mcryptd_flush_list { | ||
37 | struct list_head list; | ||
38 | struct mutex lock; | ||
39 | }; | ||
40 | |||
41 | static struct mcryptd_flush_list __percpu *mcryptd_flist; | ||
42 | |||
43 | struct hashd_instance_ctx { | ||
44 | struct crypto_shash_spawn spawn; | ||
45 | struct mcryptd_queue *queue; | ||
46 | }; | ||
47 | |||
48 | static void mcryptd_queue_worker(struct work_struct *work); | ||
49 | |||
50 | void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay) | ||
51 | { | ||
52 | struct mcryptd_flush_list *flist; | ||
53 | |||
54 | if (!cstate->flusher_engaged) { | ||
55 | /* put the flusher on the flush list */ | ||
56 | flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); | ||
57 | mutex_lock(&flist->lock); | ||
58 | list_add_tail(&cstate->flush_list, &flist->list); | ||
59 | cstate->flusher_engaged = true; | ||
60 | cstate->next_flush = jiffies + delay; | ||
61 | queue_delayed_work_on(smp_processor_id(), kcrypto_wq, | ||
62 | &cstate->flush, delay); | ||
63 | mutex_unlock(&flist->lock); | ||
64 | } | ||
65 | } | ||
66 | EXPORT_SYMBOL(mcryptd_arm_flusher); | ||
67 | |||
68 | static int mcryptd_init_queue(struct mcryptd_queue *queue, | ||
69 | unsigned int max_cpu_qlen) | ||
70 | { | ||
71 | int cpu; | ||
72 | struct mcryptd_cpu_queue *cpu_queue; | ||
73 | |||
74 | queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue); | ||
75 | pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue); | ||
76 | if (!queue->cpu_queue) | ||
77 | return -ENOMEM; | ||
78 | for_each_possible_cpu(cpu) { | ||
79 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | ||
80 | pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue); | ||
81 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); | ||
82 | INIT_WORK(&cpu_queue->work, mcryptd_queue_worker); | ||
83 | } | ||
84 | return 0; | ||
85 | } | ||
86 | |||
87 | static void mcryptd_fini_queue(struct mcryptd_queue *queue) | ||
88 | { | ||
89 | int cpu; | ||
90 | struct mcryptd_cpu_queue *cpu_queue; | ||
91 | |||
92 | for_each_possible_cpu(cpu) { | ||
93 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | ||
94 | BUG_ON(cpu_queue->queue.qlen); | ||
95 | } | ||
96 | free_percpu(queue->cpu_queue); | ||
97 | } | ||
98 | |||
99 | static int mcryptd_enqueue_request(struct mcryptd_queue *queue, | ||
100 | struct crypto_async_request *request, | ||
101 | struct mcryptd_hash_request_ctx *rctx) | ||
102 | { | ||
103 | int cpu, err; | ||
104 | struct mcryptd_cpu_queue *cpu_queue; | ||
105 | |||
106 | cpu = get_cpu(); | ||
107 | cpu_queue = this_cpu_ptr(queue->cpu_queue); | ||
108 | rctx->tag.cpu = cpu; | ||
109 | |||
110 | err = crypto_enqueue_request(&cpu_queue->queue, request); | ||
111 | pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n", | ||
112 | cpu, cpu_queue, request); | ||
113 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); | ||
114 | put_cpu(); | ||
115 | |||
116 | return err; | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * Try to opportunisticlly flush the partially completed jobs if | ||
121 | * crypto daemon is the only task running. | ||
122 | */ | ||
123 | static void mcryptd_opportunistic_flush(void) | ||
124 | { | ||
125 | struct mcryptd_flush_list *flist; | ||
126 | struct mcryptd_alg_cstate *cstate; | ||
127 | |||
128 | flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); | ||
129 | while (single_task_running()) { | ||
130 | mutex_lock(&flist->lock); | ||
131 | if (list_empty(&flist->list)) { | ||
132 | mutex_unlock(&flist->lock); | ||
133 | return; | ||
134 | } | ||
135 | cstate = list_entry(flist->list.next, | ||
136 | struct mcryptd_alg_cstate, flush_list); | ||
137 | if (!cstate->flusher_engaged) { | ||
138 | mutex_unlock(&flist->lock); | ||
139 | return; | ||
140 | } | ||
141 | list_del(&cstate->flush_list); | ||
142 | cstate->flusher_engaged = false; | ||
143 | mutex_unlock(&flist->lock); | ||
144 | cstate->alg_state->flusher(cstate); | ||
145 | } | ||
146 | } | ||
147 | |||
148 | /* | ||
149 | * Called in workqueue context, do one real cryption work (via | ||
150 | * req->complete) and reschedule itself if there are more work to | ||
151 | * do. | ||
152 | */ | ||
153 | static void mcryptd_queue_worker(struct work_struct *work) | ||
154 | { | ||
155 | struct mcryptd_cpu_queue *cpu_queue; | ||
156 | struct crypto_async_request *req, *backlog; | ||
157 | int i; | ||
158 | |||
159 | /* | ||
160 | * Need to loop through more than once for multi-buffer to | ||
161 | * be effective. | ||
162 | */ | ||
163 | |||
164 | cpu_queue = container_of(work, struct mcryptd_cpu_queue, work); | ||
165 | i = 0; | ||
166 | while (i < MCRYPTD_BATCH || single_task_running()) { | ||
167 | /* | ||
168 | * preempt_disable/enable is used to prevent | ||
169 | * being preempted by mcryptd_enqueue_request() | ||
170 | */ | ||
171 | local_bh_disable(); | ||
172 | preempt_disable(); | ||
173 | backlog = crypto_get_backlog(&cpu_queue->queue); | ||
174 | req = crypto_dequeue_request(&cpu_queue->queue); | ||
175 | preempt_enable(); | ||
176 | local_bh_enable(); | ||
177 | |||
178 | if (!req) { | ||
179 | mcryptd_opportunistic_flush(); | ||
180 | return; | ||
181 | } | ||
182 | |||
183 | if (backlog) | ||
184 | backlog->complete(backlog, -EINPROGRESS); | ||
185 | req->complete(req, 0); | ||
186 | if (!cpu_queue->queue.qlen) | ||
187 | return; | ||
188 | ++i; | ||
189 | } | ||
190 | if (cpu_queue->queue.qlen) | ||
191 | queue_work(kcrypto_wq, &cpu_queue->work); | ||
192 | } | ||
193 | |||
194 | void mcryptd_flusher(struct work_struct *__work) | ||
195 | { | ||
196 | struct mcryptd_alg_cstate *alg_cpu_state; | ||
197 | struct mcryptd_alg_state *alg_state; | ||
198 | struct mcryptd_flush_list *flist; | ||
199 | int cpu; | ||
200 | |||
201 | cpu = smp_processor_id(); | ||
202 | alg_cpu_state = container_of(to_delayed_work(__work), | ||
203 | struct mcryptd_alg_cstate, flush); | ||
204 | alg_state = alg_cpu_state->alg_state; | ||
205 | if (alg_cpu_state->cpu != cpu) | ||
206 | pr_debug("mcryptd error: work on cpu %d, should be cpu %d\n", | ||
207 | cpu, alg_cpu_state->cpu); | ||
208 | |||
209 | if (alg_cpu_state->flusher_engaged) { | ||
210 | flist = per_cpu_ptr(mcryptd_flist, cpu); | ||
211 | mutex_lock(&flist->lock); | ||
212 | list_del(&alg_cpu_state->flush_list); | ||
213 | alg_cpu_state->flusher_engaged = false; | ||
214 | mutex_unlock(&flist->lock); | ||
215 | alg_state->flusher(alg_cpu_state); | ||
216 | } | ||
217 | } | ||
218 | EXPORT_SYMBOL_GPL(mcryptd_flusher); | ||
219 | |||
220 | static inline struct mcryptd_queue *mcryptd_get_queue(struct crypto_tfm *tfm) | ||
221 | { | ||
222 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | ||
223 | struct mcryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | ||
224 | |||
225 | return ictx->queue; | ||
226 | } | ||
227 | |||
228 | static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, | ||
229 | unsigned int tail) | ||
230 | { | ||
231 | char *p; | ||
232 | struct crypto_instance *inst; | ||
233 | int err; | ||
234 | |||
235 | p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); | ||
236 | if (!p) | ||
237 | return ERR_PTR(-ENOMEM); | ||
238 | |||
239 | inst = (void *)(p + head); | ||
240 | |||
241 | err = -ENAMETOOLONG; | ||
242 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | ||
243 | "mcryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | ||
244 | goto out_free_inst; | ||
245 | |||
246 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | ||
247 | |||
248 | inst->alg.cra_priority = alg->cra_priority + 50; | ||
249 | inst->alg.cra_blocksize = alg->cra_blocksize; | ||
250 | inst->alg.cra_alignmask = alg->cra_alignmask; | ||
251 | |||
252 | out: | ||
253 | return p; | ||
254 | |||
255 | out_free_inst: | ||
256 | kfree(p); | ||
257 | p = ERR_PTR(err); | ||
258 | goto out; | ||
259 | } | ||
260 | |||
261 | static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm) | ||
262 | { | ||
263 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | ||
264 | struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); | ||
265 | struct crypto_shash_spawn *spawn = &ictx->spawn; | ||
266 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | ||
267 | struct crypto_shash *hash; | ||
268 | |||
269 | hash = crypto_spawn_shash(spawn); | ||
270 | if (IS_ERR(hash)) | ||
271 | return PTR_ERR(hash); | ||
272 | |||
273 | ctx->child = hash; | ||
274 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | ||
275 | sizeof(struct mcryptd_hash_request_ctx) + | ||
276 | crypto_shash_descsize(hash)); | ||
277 | return 0; | ||
278 | } | ||
279 | |||
280 | static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm) | ||
281 | { | ||
282 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | ||
283 | |||
284 | crypto_free_shash(ctx->child); | ||
285 | } | ||
286 | |||
287 | static int mcryptd_hash_setkey(struct crypto_ahash *parent, | ||
288 | const u8 *key, unsigned int keylen) | ||
289 | { | ||
290 | struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); | ||
291 | struct crypto_shash *child = ctx->child; | ||
292 | int err; | ||
293 | |||
294 | crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); | ||
295 | crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & | ||
296 | CRYPTO_TFM_REQ_MASK); | ||
297 | err = crypto_shash_setkey(child, key, keylen); | ||
298 | crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & | ||
299 | CRYPTO_TFM_RES_MASK); | ||
300 | return err; | ||
301 | } | ||
302 | |||
303 | static int mcryptd_hash_enqueue(struct ahash_request *req, | ||
304 | crypto_completion_t complete) | ||
305 | { | ||
306 | int ret; | ||
307 | |||
308 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | ||
309 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
310 | struct mcryptd_queue *queue = | ||
311 | mcryptd_get_queue(crypto_ahash_tfm(tfm)); | ||
312 | |||
313 | rctx->complete = req->base.complete; | ||
314 | req->base.complete = complete; | ||
315 | |||
316 | ret = mcryptd_enqueue_request(queue, &req->base, rctx); | ||
317 | |||
318 | return ret; | ||
319 | } | ||
320 | |||
321 | static void mcryptd_hash_init(struct crypto_async_request *req_async, int err) | ||
322 | { | ||
323 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | ||
324 | struct crypto_shash *child = ctx->child; | ||
325 | struct ahash_request *req = ahash_request_cast(req_async); | ||
326 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | ||
327 | struct shash_desc *desc = &rctx->desc; | ||
328 | |||
329 | if (unlikely(err == -EINPROGRESS)) | ||
330 | goto out; | ||
331 | |||
332 | desc->tfm = child; | ||
333 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; | ||
334 | |||
335 | err = crypto_shash_init(desc); | ||
336 | |||
337 | req->base.complete = rctx->complete; | ||
338 | |||
339 | out: | ||
340 | local_bh_disable(); | ||
341 | rctx->complete(&req->base, err); | ||
342 | local_bh_enable(); | ||
343 | } | ||
344 | |||
345 | static int mcryptd_hash_init_enqueue(struct ahash_request *req) | ||
346 | { | ||
347 | return mcryptd_hash_enqueue(req, mcryptd_hash_init); | ||
348 | } | ||
349 | |||
350 | static void mcryptd_hash_update(struct crypto_async_request *req_async, int err) | ||
351 | { | ||
352 | struct ahash_request *req = ahash_request_cast(req_async); | ||
353 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | ||
354 | |||
355 | if (unlikely(err == -EINPROGRESS)) | ||
356 | goto out; | ||
357 | |||
358 | err = shash_ahash_mcryptd_update(req, &rctx->desc); | ||
359 | if (err) { | ||
360 | req->base.complete = rctx->complete; | ||
361 | goto out; | ||
362 | } | ||
363 | |||
364 | return; | ||
365 | out: | ||
366 | local_bh_disable(); | ||
367 | rctx->complete(&req->base, err); | ||
368 | local_bh_enable(); | ||
369 | } | ||
370 | |||
371 | static int mcryptd_hash_update_enqueue(struct ahash_request *req) | ||
372 | { | ||
373 | return mcryptd_hash_enqueue(req, mcryptd_hash_update); | ||
374 | } | ||
375 | |||
376 | static void mcryptd_hash_final(struct crypto_async_request *req_async, int err) | ||
377 | { | ||
378 | struct ahash_request *req = ahash_request_cast(req_async); | ||
379 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | ||
380 | |||
381 | if (unlikely(err == -EINPROGRESS)) | ||
382 | goto out; | ||
383 | |||
384 | err = shash_ahash_mcryptd_final(req, &rctx->desc); | ||
385 | if (err) { | ||
386 | req->base.complete = rctx->complete; | ||
387 | goto out; | ||
388 | } | ||
389 | |||
390 | return; | ||
391 | out: | ||
392 | local_bh_disable(); | ||
393 | rctx->complete(&req->base, err); | ||
394 | local_bh_enable(); | ||
395 | } | ||
396 | |||
397 | static int mcryptd_hash_final_enqueue(struct ahash_request *req) | ||
398 | { | ||
399 | return mcryptd_hash_enqueue(req, mcryptd_hash_final); | ||
400 | } | ||
401 | |||
402 | static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err) | ||
403 | { | ||
404 | struct ahash_request *req = ahash_request_cast(req_async); | ||
405 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | ||
406 | |||
407 | if (unlikely(err == -EINPROGRESS)) | ||
408 | goto out; | ||
409 | |||
410 | err = shash_ahash_mcryptd_finup(req, &rctx->desc); | ||
411 | |||
412 | if (err) { | ||
413 | req->base.complete = rctx->complete; | ||
414 | goto out; | ||
415 | } | ||
416 | |||
417 | return; | ||
418 | out: | ||
419 | local_bh_disable(); | ||
420 | rctx->complete(&req->base, err); | ||
421 | local_bh_enable(); | ||
422 | } | ||
423 | |||
424 | static int mcryptd_hash_finup_enqueue(struct ahash_request *req) | ||
425 | { | ||
426 | return mcryptd_hash_enqueue(req, mcryptd_hash_finup); | ||
427 | } | ||
428 | |||
429 | static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err) | ||
430 | { | ||
431 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | ||
432 | struct crypto_shash *child = ctx->child; | ||
433 | struct ahash_request *req = ahash_request_cast(req_async); | ||
434 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | ||
435 | struct shash_desc *desc = &rctx->desc; | ||
436 | |||
437 | if (unlikely(err == -EINPROGRESS)) | ||
438 | goto out; | ||
439 | |||
440 | desc->tfm = child; | ||
441 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; /* check this again */ | ||
442 | |||
443 | err = shash_ahash_mcryptd_digest(req, desc); | ||
444 | |||
445 | if (err) { | ||
446 | req->base.complete = rctx->complete; | ||
447 | goto out; | ||
448 | } | ||
449 | |||
450 | return; | ||
451 | out: | ||
452 | local_bh_disable(); | ||
453 | rctx->complete(&req->base, err); | ||
454 | local_bh_enable(); | ||
455 | } | ||
456 | |||
457 | static int mcryptd_hash_digest_enqueue(struct ahash_request *req) | ||
458 | { | ||
459 | return mcryptd_hash_enqueue(req, mcryptd_hash_digest); | ||
460 | } | ||
461 | |||
462 | static int mcryptd_hash_export(struct ahash_request *req, void *out) | ||
463 | { | ||
464 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | ||
465 | |||
466 | return crypto_shash_export(&rctx->desc, out); | ||
467 | } | ||
468 | |||
469 | static int mcryptd_hash_import(struct ahash_request *req, const void *in) | ||
470 | { | ||
471 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | ||
472 | |||
473 | return crypto_shash_import(&rctx->desc, in); | ||
474 | } | ||
475 | |||
476 | static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, | ||
477 | struct mcryptd_queue *queue) | ||
478 | { | ||
479 | struct hashd_instance_ctx *ctx; | ||
480 | struct ahash_instance *inst; | ||
481 | struct shash_alg *salg; | ||
482 | struct crypto_alg *alg; | ||
483 | int err; | ||
484 | |||
485 | salg = shash_attr_alg(tb[1], 0, 0); | ||
486 | if (IS_ERR(salg)) | ||
487 | return PTR_ERR(salg); | ||
488 | |||
489 | alg = &salg->base; | ||
490 | pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name); | ||
491 | inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(), | ||
492 | sizeof(*ctx)); | ||
493 | err = PTR_ERR(inst); | ||
494 | if (IS_ERR(inst)) | ||
495 | goto out_put_alg; | ||
496 | |||
497 | ctx = ahash_instance_ctx(inst); | ||
498 | ctx->queue = queue; | ||
499 | |||
500 | err = crypto_init_shash_spawn(&ctx->spawn, salg, | ||
501 | ahash_crypto_instance(inst)); | ||
502 | if (err) | ||
503 | goto out_free_inst; | ||
504 | |||
505 | inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC; | ||
506 | |||
507 | inst->alg.halg.digestsize = salg->digestsize; | ||
508 | inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx); | ||
509 | |||
510 | inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm; | ||
511 | inst->alg.halg.base.cra_exit = mcryptd_hash_exit_tfm; | ||
512 | |||
513 | inst->alg.init = mcryptd_hash_init_enqueue; | ||
514 | inst->alg.update = mcryptd_hash_update_enqueue; | ||
515 | inst->alg.final = mcryptd_hash_final_enqueue; | ||
516 | inst->alg.finup = mcryptd_hash_finup_enqueue; | ||
517 | inst->alg.export = mcryptd_hash_export; | ||
518 | inst->alg.import = mcryptd_hash_import; | ||
519 | inst->alg.setkey = mcryptd_hash_setkey; | ||
520 | inst->alg.digest = mcryptd_hash_digest_enqueue; | ||
521 | |||
522 | err = ahash_register_instance(tmpl, inst); | ||
523 | if (err) { | ||
524 | crypto_drop_shash(&ctx->spawn); | ||
525 | out_free_inst: | ||
526 | kfree(inst); | ||
527 | } | ||
528 | |||
529 | out_put_alg: | ||
530 | crypto_mod_put(alg); | ||
531 | return err; | ||
532 | } | ||
533 | |||
534 | static struct mcryptd_queue mqueue; | ||
535 | |||
536 | static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb) | ||
537 | { | ||
538 | struct crypto_attr_type *algt; | ||
539 | |||
540 | algt = crypto_get_attr_type(tb); | ||
541 | if (IS_ERR(algt)) | ||
542 | return PTR_ERR(algt); | ||
543 | |||
544 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | ||
545 | case CRYPTO_ALG_TYPE_DIGEST: | ||
546 | return mcryptd_create_hash(tmpl, tb, &mqueue); | ||
547 | break; | ||
548 | } | ||
549 | |||
550 | return -EINVAL; | ||
551 | } | ||
552 | |||
553 | static void mcryptd_free(struct crypto_instance *inst) | ||
554 | { | ||
555 | struct mcryptd_instance_ctx *ctx = crypto_instance_ctx(inst); | ||
556 | struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); | ||
557 | |||
558 | switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { | ||
559 | case CRYPTO_ALG_TYPE_AHASH: | ||
560 | crypto_drop_shash(&hctx->spawn); | ||
561 | kfree(ahash_instance(inst)); | ||
562 | return; | ||
563 | default: | ||
564 | crypto_drop_spawn(&ctx->spawn); | ||
565 | kfree(inst); | ||
566 | } | ||
567 | } | ||
568 | |||
569 | static struct crypto_template mcryptd_tmpl = { | ||
570 | .name = "mcryptd", | ||
571 | .create = mcryptd_create, | ||
572 | .free = mcryptd_free, | ||
573 | .module = THIS_MODULE, | ||
574 | }; | ||
575 | |||
576 | struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name, | ||
577 | u32 type, u32 mask) | ||
578 | { | ||
579 | char mcryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | ||
580 | struct crypto_ahash *tfm; | ||
581 | |||
582 | if (snprintf(mcryptd_alg_name, CRYPTO_MAX_ALG_NAME, | ||
583 | "mcryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | ||
584 | return ERR_PTR(-EINVAL); | ||
585 | tfm = crypto_alloc_ahash(mcryptd_alg_name, type, mask); | ||
586 | if (IS_ERR(tfm)) | ||
587 | return ERR_CAST(tfm); | ||
588 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { | ||
589 | crypto_free_ahash(tfm); | ||
590 | return ERR_PTR(-EINVAL); | ||
591 | } | ||
592 | |||
593 | return __mcryptd_ahash_cast(tfm); | ||
594 | } | ||
595 | EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash); | ||
596 | |||
597 | int shash_ahash_mcryptd_digest(struct ahash_request *req, | ||
598 | struct shash_desc *desc) | ||
599 | { | ||
600 | int err; | ||
601 | |||
602 | err = crypto_shash_init(desc) ?: | ||
603 | shash_ahash_mcryptd_finup(req, desc); | ||
604 | |||
605 | return err; | ||
606 | } | ||
607 | EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_digest); | ||
608 | |||
609 | int shash_ahash_mcryptd_update(struct ahash_request *req, | ||
610 | struct shash_desc *desc) | ||
611 | { | ||
612 | struct crypto_shash *tfm = desc->tfm; | ||
613 | struct shash_alg *shash = crypto_shash_alg(tfm); | ||
614 | |||
615 | /* alignment is to be done by multi-buffer crypto algorithm if needed */ | ||
616 | |||
617 | return shash->update(desc, NULL, 0); | ||
618 | } | ||
619 | EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_update); | ||
620 | |||
621 | int shash_ahash_mcryptd_finup(struct ahash_request *req, | ||
622 | struct shash_desc *desc) | ||
623 | { | ||
624 | struct crypto_shash *tfm = desc->tfm; | ||
625 | struct shash_alg *shash = crypto_shash_alg(tfm); | ||
626 | |||
627 | /* alignment is to be done by multi-buffer crypto algorithm if needed */ | ||
628 | |||
629 | return shash->finup(desc, NULL, 0, req->result); | ||
630 | } | ||
631 | EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_finup); | ||
632 | |||
633 | int shash_ahash_mcryptd_final(struct ahash_request *req, | ||
634 | struct shash_desc *desc) | ||
635 | { | ||
636 | struct crypto_shash *tfm = desc->tfm; | ||
637 | struct shash_alg *shash = crypto_shash_alg(tfm); | ||
638 | |||
639 | /* alignment is to be done by multi-buffer crypto algorithm if needed */ | ||
640 | |||
641 | return shash->final(desc, req->result); | ||
642 | } | ||
643 | EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_final); | ||
644 | |||
645 | struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm) | ||
646 | { | ||
647 | struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); | ||
648 | |||
649 | return ctx->child; | ||
650 | } | ||
651 | EXPORT_SYMBOL_GPL(mcryptd_ahash_child); | ||
652 | |||
653 | struct shash_desc *mcryptd_shash_desc(struct ahash_request *req) | ||
654 | { | ||
655 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | ||
656 | return &rctx->desc; | ||
657 | } | ||
658 | EXPORT_SYMBOL_GPL(mcryptd_shash_desc); | ||
659 | |||
660 | void mcryptd_free_ahash(struct mcryptd_ahash *tfm) | ||
661 | { | ||
662 | crypto_free_ahash(&tfm->base); | ||
663 | } | ||
664 | EXPORT_SYMBOL_GPL(mcryptd_free_ahash); | ||
665 | |||
666 | |||
667 | static int __init mcryptd_init(void) | ||
668 | { | ||
669 | int err, cpu; | ||
670 | struct mcryptd_flush_list *flist; | ||
671 | |||
672 | mcryptd_flist = alloc_percpu(struct mcryptd_flush_list); | ||
673 | for_each_possible_cpu(cpu) { | ||
674 | flist = per_cpu_ptr(mcryptd_flist, cpu); | ||
675 | INIT_LIST_HEAD(&flist->list); | ||
676 | mutex_init(&flist->lock); | ||
677 | } | ||
678 | |||
679 | err = mcryptd_init_queue(&mqueue, MCRYPTD_MAX_CPU_QLEN); | ||
680 | if (err) { | ||
681 | free_percpu(mcryptd_flist); | ||
682 | return err; | ||
683 | } | ||
684 | |||
685 | err = crypto_register_template(&mcryptd_tmpl); | ||
686 | if (err) { | ||
687 | mcryptd_fini_queue(&mqueue); | ||
688 | free_percpu(mcryptd_flist); | ||
689 | } | ||
690 | |||
691 | return err; | ||
692 | } | ||
693 | |||
694 | static void __exit mcryptd_exit(void) | ||
695 | { | ||
696 | mcryptd_fini_queue(&mqueue); | ||
697 | crypto_unregister_template(&mcryptd_tmpl); | ||
698 | free_percpu(mcryptd_flist); | ||
699 | } | ||
700 | |||
701 | subsys_initcall(mcryptd_init); | ||
702 | module_exit(mcryptd_exit); | ||
703 | |||
704 | MODULE_LICENSE("GPL"); | ||
705 | MODULE_DESCRIPTION("Software async multibuffer crypto daemon"); | ||
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c index 543366779524..0bb558344699 100644 --- a/crypto/sha256_generic.c +++ b/crypto/sha256_generic.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/types.h> | 24 | #include <linux/types.h> |
25 | #include <crypto/sha.h> | 25 | #include <crypto/sha.h> |
26 | #include <asm/byteorder.h> | 26 | #include <asm/byteorder.h> |
27 | #include <asm/unaligned.h> | ||
27 | 28 | ||
28 | static inline u32 Ch(u32 x, u32 y, u32 z) | 29 | static inline u32 Ch(u32 x, u32 y, u32 z) |
29 | { | 30 | { |
@@ -42,7 +43,7 @@ static inline u32 Maj(u32 x, u32 y, u32 z) | |||
42 | 43 | ||
43 | static inline void LOAD_OP(int I, u32 *W, const u8 *input) | 44 | static inline void LOAD_OP(int I, u32 *W, const u8 *input) |
44 | { | 45 | { |
45 | W[I] = __be32_to_cpu( ((__be32*)(input))[I] ); | 46 | W[I] = get_unaligned_be32((__u32 *)input + I); |
46 | } | 47 | } |
47 | 48 | ||
48 | static inline void BLEND_OP(int I, u32 *W) | 49 | static inline void BLEND_OP(int I, u32 *W) |
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c index 6ed124f3ea0f..6dde57dc511b 100644 --- a/crypto/sha512_generic.c +++ b/crypto/sha512_generic.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <crypto/sha.h> | 20 | #include <crypto/sha.h> |
21 | #include <linux/percpu.h> | 21 | #include <linux/percpu.h> |
22 | #include <asm/byteorder.h> | 22 | #include <asm/byteorder.h> |
23 | #include <asm/unaligned.h> | ||
23 | 24 | ||
24 | static inline u64 Ch(u64 x, u64 y, u64 z) | 25 | static inline u64 Ch(u64 x, u64 y, u64 z) |
25 | { | 26 | { |
@@ -68,7 +69,7 @@ static const u64 sha512_K[80] = { | |||
68 | 69 | ||
69 | static inline void LOAD_OP(int I, u64 *W, const u8 *input) | 70 | static inline void LOAD_OP(int I, u64 *W, const u8 *input) |
70 | { | 71 | { |
71 | W[I] = __be64_to_cpu( ((__be64*)(input))[I] ); | 72 | W[I] = get_unaligned_be64((__u64 *)input + I); |
72 | } | 73 | } |
73 | 74 | ||
74 | static inline void BLEND_OP(int I, u64 *W) | 75 | static inline void BLEND_OP(int I, u64 *W) |
diff --git a/crypto/testmgr.c b/crypto/testmgr.c index ac2b63105afc..9459dfd7357f 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c | |||
@@ -178,9 +178,7 @@ static void testmgr_free_buf(char *buf[XBUFSIZE]) | |||
178 | free_page((unsigned long)buf[i]); | 178 | free_page((unsigned long)buf[i]); |
179 | } | 179 | } |
180 | 180 | ||
181 | static int do_one_async_hash_op(struct ahash_request *req, | 181 | static int wait_async_op(struct tcrypt_result *tr, int ret) |
182 | struct tcrypt_result *tr, | ||
183 | int ret) | ||
184 | { | 182 | { |
185 | if (ret == -EINPROGRESS || ret == -EBUSY) { | 183 | if (ret == -EINPROGRESS || ret == -EBUSY) { |
186 | ret = wait_for_completion_interruptible(&tr->completion); | 184 | ret = wait_for_completion_interruptible(&tr->completion); |
@@ -264,30 +262,26 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, | |||
264 | 262 | ||
265 | ahash_request_set_crypt(req, sg, result, template[i].psize); | 263 | ahash_request_set_crypt(req, sg, result, template[i].psize); |
266 | if (use_digest) { | 264 | if (use_digest) { |
267 | ret = do_one_async_hash_op(req, &tresult, | 265 | ret = wait_async_op(&tresult, crypto_ahash_digest(req)); |
268 | crypto_ahash_digest(req)); | ||
269 | if (ret) { | 266 | if (ret) { |
270 | pr_err("alg: hash: digest failed on test %d " | 267 | pr_err("alg: hash: digest failed on test %d " |
271 | "for %s: ret=%d\n", j, algo, -ret); | 268 | "for %s: ret=%d\n", j, algo, -ret); |
272 | goto out; | 269 | goto out; |
273 | } | 270 | } |
274 | } else { | 271 | } else { |
275 | ret = do_one_async_hash_op(req, &tresult, | 272 | ret = wait_async_op(&tresult, crypto_ahash_init(req)); |
276 | crypto_ahash_init(req)); | ||
277 | if (ret) { | 273 | if (ret) { |
278 | pr_err("alt: hash: init failed on test %d " | 274 | pr_err("alt: hash: init failed on test %d " |
279 | "for %s: ret=%d\n", j, algo, -ret); | 275 | "for %s: ret=%d\n", j, algo, -ret); |
280 | goto out; | 276 | goto out; |
281 | } | 277 | } |
282 | ret = do_one_async_hash_op(req, &tresult, | 278 | ret = wait_async_op(&tresult, crypto_ahash_update(req)); |
283 | crypto_ahash_update(req)); | ||
284 | if (ret) { | 279 | if (ret) { |
285 | pr_err("alt: hash: update failed on test %d " | 280 | pr_err("alt: hash: update failed on test %d " |
286 | "for %s: ret=%d\n", j, algo, -ret); | 281 | "for %s: ret=%d\n", j, algo, -ret); |
287 | goto out; | 282 | goto out; |
288 | } | 283 | } |
289 | ret = do_one_async_hash_op(req, &tresult, | 284 | ret = wait_async_op(&tresult, crypto_ahash_final(req)); |
290 | crypto_ahash_final(req)); | ||
291 | if (ret) { | 285 | if (ret) { |
292 | pr_err("alt: hash: final failed on test %d " | 286 | pr_err("alt: hash: final failed on test %d " |
293 | "for %s: ret=%d\n", j, algo, -ret); | 287 | "for %s: ret=%d\n", j, algo, -ret); |
@@ -311,78 +305,75 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, | |||
311 | if (align_offset != 0) | 305 | if (align_offset != 0) |
312 | break; | 306 | break; |
313 | 307 | ||
314 | if (template[i].np) { | 308 | if (!template[i].np) |
315 | j++; | 309 | continue; |
316 | memset(result, 0, MAX_DIGEST_SIZE); | ||
317 | 310 | ||
318 | temp = 0; | 311 | j++; |
319 | sg_init_table(sg, template[i].np); | 312 | memset(result, 0, MAX_DIGEST_SIZE); |
320 | ret = -EINVAL; | 313 | |
321 | for (k = 0; k < template[i].np; k++) { | 314 | temp = 0; |
322 | if (WARN_ON(offset_in_page(IDX[k]) + | 315 | sg_init_table(sg, template[i].np); |
323 | template[i].tap[k] > PAGE_SIZE)) | 316 | ret = -EINVAL; |
324 | goto out; | 317 | for (k = 0; k < template[i].np; k++) { |
325 | sg_set_buf(&sg[k], | 318 | if (WARN_ON(offset_in_page(IDX[k]) + |
326 | memcpy(xbuf[IDX[k] >> PAGE_SHIFT] + | 319 | template[i].tap[k] > PAGE_SIZE)) |
327 | offset_in_page(IDX[k]), | ||
328 | template[i].plaintext + temp, | ||
329 | template[i].tap[k]), | ||
330 | template[i].tap[k]); | ||
331 | temp += template[i].tap[k]; | ||
332 | } | ||
333 | |||
334 | if (template[i].ksize) { | ||
335 | if (template[i].ksize > MAX_KEYLEN) { | ||
336 | pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n", | ||
337 | j, algo, template[i].ksize, | ||
338 | MAX_KEYLEN); | ||
339 | ret = -EINVAL; | ||
340 | goto out; | ||
341 | } | ||
342 | crypto_ahash_clear_flags(tfm, ~0); | ||
343 | memcpy(key, template[i].key, template[i].ksize); | ||
344 | ret = crypto_ahash_setkey(tfm, key, | ||
345 | template[i].ksize); | ||
346 | |||
347 | if (ret) { | ||
348 | printk(KERN_ERR "alg: hash: setkey " | ||
349 | "failed on chunking test %d " | ||
350 | "for %s: ret=%d\n", j, algo, | ||
351 | -ret); | ||
352 | goto out; | ||
353 | } | ||
354 | } | ||
355 | |||
356 | ahash_request_set_crypt(req, sg, result, | ||
357 | template[i].psize); | ||
358 | ret = crypto_ahash_digest(req); | ||
359 | switch (ret) { | ||
360 | case 0: | ||
361 | break; | ||
362 | case -EINPROGRESS: | ||
363 | case -EBUSY: | ||
364 | ret = wait_for_completion_interruptible( | ||
365 | &tresult.completion); | ||
366 | if (!ret && !(ret = tresult.err)) { | ||
367 | reinit_completion(&tresult.completion); | ||
368 | break; | ||
369 | } | ||
370 | /* fall through */ | ||
371 | default: | ||
372 | printk(KERN_ERR "alg: hash: digest failed " | ||
373 | "on chunking test %d for %s: " | ||
374 | "ret=%d\n", j, algo, -ret); | ||
375 | goto out; | 320 | goto out; |
376 | } | 321 | sg_set_buf(&sg[k], |
322 | memcpy(xbuf[IDX[k] >> PAGE_SHIFT] + | ||
323 | offset_in_page(IDX[k]), | ||
324 | template[i].plaintext + temp, | ||
325 | template[i].tap[k]), | ||
326 | template[i].tap[k]); | ||
327 | temp += template[i].tap[k]; | ||
328 | } | ||
377 | 329 | ||
378 | if (memcmp(result, template[i].digest, | 330 | if (template[i].ksize) { |
379 | crypto_ahash_digestsize(tfm))) { | 331 | if (template[i].ksize > MAX_KEYLEN) { |
380 | printk(KERN_ERR "alg: hash: Chunking test %d " | 332 | pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n", |
381 | "failed for %s\n", j, algo); | 333 | j, algo, template[i].ksize, MAX_KEYLEN); |
382 | hexdump(result, crypto_ahash_digestsize(tfm)); | ||
383 | ret = -EINVAL; | 334 | ret = -EINVAL; |
384 | goto out; | 335 | goto out; |
385 | } | 336 | } |
337 | crypto_ahash_clear_flags(tfm, ~0); | ||
338 | memcpy(key, template[i].key, template[i].ksize); | ||
339 | ret = crypto_ahash_setkey(tfm, key, template[i].ksize); | ||
340 | |||
341 | if (ret) { | ||
342 | printk(KERN_ERR "alg: hash: setkey " | ||
343 | "failed on chunking test %d " | ||
344 | "for %s: ret=%d\n", j, algo, -ret); | ||
345 | goto out; | ||
346 | } | ||
347 | } | ||
348 | |||
349 | ahash_request_set_crypt(req, sg, result, template[i].psize); | ||
350 | ret = crypto_ahash_digest(req); | ||
351 | switch (ret) { | ||
352 | case 0: | ||
353 | break; | ||
354 | case -EINPROGRESS: | ||
355 | case -EBUSY: | ||
356 | ret = wait_for_completion_interruptible( | ||
357 | &tresult.completion); | ||
358 | if (!ret && !(ret = tresult.err)) { | ||
359 | reinit_completion(&tresult.completion); | ||
360 | break; | ||
361 | } | ||
362 | /* fall through */ | ||
363 | default: | ||
364 | printk(KERN_ERR "alg: hash: digest failed " | ||
365 | "on chunking test %d for %s: " | ||
366 | "ret=%d\n", j, algo, -ret); | ||
367 | goto out; | ||
368 | } | ||
369 | |||
370 | if (memcmp(result, template[i].digest, | ||
371 | crypto_ahash_digestsize(tfm))) { | ||
372 | printk(KERN_ERR "alg: hash: Chunking test %d " | ||
373 | "failed for %s\n", j, algo); | ||
374 | hexdump(result, crypto_ahash_digestsize(tfm)); | ||
375 | ret = -EINVAL; | ||
376 | goto out; | ||
386 | } | 377 | } |
387 | } | 378 | } |
388 | 379 | ||
@@ -492,121 +483,116 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
492 | tcrypt_complete, &result); | 483 | tcrypt_complete, &result); |
493 | 484 | ||
494 | for (i = 0, j = 0; i < tcount; i++) { | 485 | for (i = 0, j = 0; i < tcount; i++) { |
495 | if (!template[i].np) { | 486 | if (template[i].np) |
496 | j++; | 487 | continue; |
497 | 488 | ||
498 | /* some templates have no input data but they will | 489 | j++; |
499 | * touch input | ||
500 | */ | ||
501 | input = xbuf[0]; | ||
502 | input += align_offset; | ||
503 | assoc = axbuf[0]; | ||
504 | 490 | ||
505 | ret = -EINVAL; | 491 | /* some templates have no input data but they will |
506 | if (WARN_ON(align_offset + template[i].ilen > | 492 | * touch input |
507 | PAGE_SIZE || template[i].alen > PAGE_SIZE)) | 493 | */ |
508 | goto out; | 494 | input = xbuf[0]; |
495 | input += align_offset; | ||
496 | assoc = axbuf[0]; | ||
509 | 497 | ||
510 | memcpy(input, template[i].input, template[i].ilen); | 498 | ret = -EINVAL; |
511 | memcpy(assoc, template[i].assoc, template[i].alen); | 499 | if (WARN_ON(align_offset + template[i].ilen > |
512 | if (template[i].iv) | 500 | PAGE_SIZE || template[i].alen > PAGE_SIZE)) |
513 | memcpy(iv, template[i].iv, MAX_IVLEN); | 501 | goto out; |
514 | else | ||
515 | memset(iv, 0, MAX_IVLEN); | ||
516 | 502 | ||
517 | crypto_aead_clear_flags(tfm, ~0); | 503 | memcpy(input, template[i].input, template[i].ilen); |
518 | if (template[i].wk) | 504 | memcpy(assoc, template[i].assoc, template[i].alen); |
519 | crypto_aead_set_flags( | 505 | if (template[i].iv) |
520 | tfm, CRYPTO_TFM_REQ_WEAK_KEY); | 506 | memcpy(iv, template[i].iv, MAX_IVLEN); |
507 | else | ||
508 | memset(iv, 0, MAX_IVLEN); | ||
521 | 509 | ||
522 | if (template[i].klen > MAX_KEYLEN) { | 510 | crypto_aead_clear_flags(tfm, ~0); |
523 | pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n", | 511 | if (template[i].wk) |
524 | d, j, algo, template[i].klen, | 512 | crypto_aead_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY); |
525 | MAX_KEYLEN); | ||
526 | ret = -EINVAL; | ||
527 | goto out; | ||
528 | } | ||
529 | memcpy(key, template[i].key, template[i].klen); | ||
530 | 513 | ||
531 | ret = crypto_aead_setkey(tfm, key, | 514 | if (template[i].klen > MAX_KEYLEN) { |
532 | template[i].klen); | 515 | pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n", |
533 | if (!ret == template[i].fail) { | 516 | d, j, algo, template[i].klen, |
534 | pr_err("alg: aead%s: setkey failed on test %d for %s: flags=%x\n", | 517 | MAX_KEYLEN); |
535 | d, j, algo, crypto_aead_get_flags(tfm)); | 518 | ret = -EINVAL; |
536 | goto out; | 519 | goto out; |
537 | } else if (ret) | 520 | } |
538 | continue; | 521 | memcpy(key, template[i].key, template[i].klen); |
539 | 522 | ||
540 | authsize = abs(template[i].rlen - template[i].ilen); | 523 | ret = crypto_aead_setkey(tfm, key, template[i].klen); |
541 | ret = crypto_aead_setauthsize(tfm, authsize); | 524 | if (!ret == template[i].fail) { |
542 | if (ret) { | 525 | pr_err("alg: aead%s: setkey failed on test %d for %s: flags=%x\n", |
543 | pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n", | 526 | d, j, algo, crypto_aead_get_flags(tfm)); |
544 | d, authsize, j, algo); | 527 | goto out; |
545 | goto out; | 528 | } else if (ret) |
546 | } | 529 | continue; |
547 | 530 | ||
548 | if (diff_dst) { | 531 | authsize = abs(template[i].rlen - template[i].ilen); |
549 | output = xoutbuf[0]; | 532 | ret = crypto_aead_setauthsize(tfm, authsize); |
550 | output += align_offset; | 533 | if (ret) { |
551 | sg_init_one(&sg[0], input, template[i].ilen); | 534 | pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n", |
552 | sg_init_one(&sgout[0], output, | 535 | d, authsize, j, algo); |
553 | template[i].rlen); | 536 | goto out; |
554 | } else { | 537 | } |
555 | sg_init_one(&sg[0], input, | ||
556 | template[i].ilen + | ||
557 | (enc ? authsize : 0)); | ||
558 | output = input; | ||
559 | } | ||
560 | 538 | ||
561 | sg_init_one(&asg[0], assoc, template[i].alen); | 539 | if (diff_dst) { |
540 | output = xoutbuf[0]; | ||
541 | output += align_offset; | ||
542 | sg_init_one(&sg[0], input, template[i].ilen); | ||
543 | sg_init_one(&sgout[0], output, template[i].rlen); | ||
544 | } else { | ||
545 | sg_init_one(&sg[0], input, | ||
546 | template[i].ilen + (enc ? authsize : 0)); | ||
547 | output = input; | ||
548 | } | ||
562 | 549 | ||
563 | aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, | 550 | sg_init_one(&asg[0], assoc, template[i].alen); |
564 | template[i].ilen, iv); | ||
565 | 551 | ||
566 | aead_request_set_assoc(req, asg, template[i].alen); | 552 | aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, |
553 | template[i].ilen, iv); | ||
567 | 554 | ||
568 | ret = enc ? | 555 | aead_request_set_assoc(req, asg, template[i].alen); |
569 | crypto_aead_encrypt(req) : | ||
570 | crypto_aead_decrypt(req); | ||
571 | 556 | ||
572 | switch (ret) { | 557 | ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); |
573 | case 0: | ||
574 | if (template[i].novrfy) { | ||
575 | /* verification was supposed to fail */ | ||
576 | pr_err("alg: aead%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n", | ||
577 | d, e, j, algo); | ||
578 | /* so really, we got a bad message */ | ||
579 | ret = -EBADMSG; | ||
580 | goto out; | ||
581 | } | ||
582 | break; | ||
583 | case -EINPROGRESS: | ||
584 | case -EBUSY: | ||
585 | ret = wait_for_completion_interruptible( | ||
586 | &result.completion); | ||
587 | if (!ret && !(ret = result.err)) { | ||
588 | reinit_completion(&result.completion); | ||
589 | break; | ||
590 | } | ||
591 | case -EBADMSG: | ||
592 | if (template[i].novrfy) | ||
593 | /* verification failure was expected */ | ||
594 | continue; | ||
595 | /* fall through */ | ||
596 | default: | ||
597 | pr_err("alg: aead%s: %s failed on test %d for %s: ret=%d\n", | ||
598 | d, e, j, algo, -ret); | ||
599 | goto out; | ||
600 | } | ||
601 | 558 | ||
602 | q = output; | 559 | switch (ret) { |
603 | if (memcmp(q, template[i].result, template[i].rlen)) { | 560 | case 0: |
604 | pr_err("alg: aead%s: Test %d failed on %s for %s\n", | 561 | if (template[i].novrfy) { |
605 | d, j, e, algo); | 562 | /* verification was supposed to fail */ |
606 | hexdump(q, template[i].rlen); | 563 | pr_err("alg: aead%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n", |
607 | ret = -EINVAL; | 564 | d, e, j, algo); |
565 | /* so really, we got a bad message */ | ||
566 | ret = -EBADMSG; | ||
608 | goto out; | 567 | goto out; |
609 | } | 568 | } |
569 | break; | ||
570 | case -EINPROGRESS: | ||
571 | case -EBUSY: | ||
572 | ret = wait_for_completion_interruptible( | ||
573 | &result.completion); | ||
574 | if (!ret && !(ret = result.err)) { | ||
575 | reinit_completion(&result.completion); | ||
576 | break; | ||
577 | } | ||
578 | case -EBADMSG: | ||
579 | if (template[i].novrfy) | ||
580 | /* verification failure was expected */ | ||
581 | continue; | ||
582 | /* fall through */ | ||
583 | default: | ||
584 | pr_err("alg: aead%s: %s failed on test %d for %s: ret=%d\n", | ||
585 | d, e, j, algo, -ret); | ||
586 | goto out; | ||
587 | } | ||
588 | |||
589 | q = output; | ||
590 | if (memcmp(q, template[i].result, template[i].rlen)) { | ||
591 | pr_err("alg: aead%s: Test %d failed on %s for %s\n", | ||
592 | d, j, e, algo); | ||
593 | hexdump(q, template[i].rlen); | ||
594 | ret = -EINVAL; | ||
595 | goto out; | ||
610 | } | 596 | } |
611 | } | 597 | } |
612 | 598 | ||
@@ -615,191 +601,182 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
615 | if (align_offset != 0) | 601 | if (align_offset != 0) |
616 | break; | 602 | break; |
617 | 603 | ||
618 | if (template[i].np) { | 604 | if (!template[i].np) |
619 | j++; | 605 | continue; |
620 | |||
621 | if (template[i].iv) | ||
622 | memcpy(iv, template[i].iv, MAX_IVLEN); | ||
623 | else | ||
624 | memset(iv, 0, MAX_IVLEN); | ||
625 | |||
626 | crypto_aead_clear_flags(tfm, ~0); | ||
627 | if (template[i].wk) | ||
628 | crypto_aead_set_flags( | ||
629 | tfm, CRYPTO_TFM_REQ_WEAK_KEY); | ||
630 | if (template[i].klen > MAX_KEYLEN) { | ||
631 | pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n", | ||
632 | d, j, algo, template[i].klen, | ||
633 | MAX_KEYLEN); | ||
634 | ret = -EINVAL; | ||
635 | goto out; | ||
636 | } | ||
637 | memcpy(key, template[i].key, template[i].klen); | ||
638 | 606 | ||
639 | ret = crypto_aead_setkey(tfm, key, template[i].klen); | 607 | j++; |
640 | if (!ret == template[i].fail) { | ||
641 | pr_err("alg: aead%s: setkey failed on chunk test %d for %s: flags=%x\n", | ||
642 | d, j, algo, crypto_aead_get_flags(tfm)); | ||
643 | goto out; | ||
644 | } else if (ret) | ||
645 | continue; | ||
646 | 608 | ||
647 | authsize = abs(template[i].rlen - template[i].ilen); | 609 | if (template[i].iv) |
610 | memcpy(iv, template[i].iv, MAX_IVLEN); | ||
611 | else | ||
612 | memset(iv, 0, MAX_IVLEN); | ||
648 | 613 | ||
614 | crypto_aead_clear_flags(tfm, ~0); | ||
615 | if (template[i].wk) | ||
616 | crypto_aead_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY); | ||
617 | if (template[i].klen > MAX_KEYLEN) { | ||
618 | pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n", | ||
619 | d, j, algo, template[i].klen, MAX_KEYLEN); | ||
649 | ret = -EINVAL; | 620 | ret = -EINVAL; |
650 | sg_init_table(sg, template[i].np); | 621 | goto out; |
651 | if (diff_dst) | 622 | } |
652 | sg_init_table(sgout, template[i].np); | 623 | memcpy(key, template[i].key, template[i].klen); |
653 | for (k = 0, temp = 0; k < template[i].np; k++) { | ||
654 | if (WARN_ON(offset_in_page(IDX[k]) + | ||
655 | template[i].tap[k] > PAGE_SIZE)) | ||
656 | goto out; | ||
657 | |||
658 | q = xbuf[IDX[k] >> PAGE_SHIFT] + | ||
659 | offset_in_page(IDX[k]); | ||
660 | 624 | ||
661 | memcpy(q, template[i].input + temp, | 625 | ret = crypto_aead_setkey(tfm, key, template[i].klen); |
662 | template[i].tap[k]); | 626 | if (!ret == template[i].fail) { |
627 | pr_err("alg: aead%s: setkey failed on chunk test %d for %s: flags=%x\n", | ||
628 | d, j, algo, crypto_aead_get_flags(tfm)); | ||
629 | goto out; | ||
630 | } else if (ret) | ||
631 | continue; | ||
663 | 632 | ||
664 | sg_set_buf(&sg[k], q, template[i].tap[k]); | 633 | authsize = abs(template[i].rlen - template[i].ilen); |
665 | 634 | ||
666 | if (diff_dst) { | 635 | ret = -EINVAL; |
667 | q = xoutbuf[IDX[k] >> PAGE_SHIFT] + | 636 | sg_init_table(sg, template[i].np); |
668 | offset_in_page(IDX[k]); | 637 | if (diff_dst) |
638 | sg_init_table(sgout, template[i].np); | ||
639 | for (k = 0, temp = 0; k < template[i].np; k++) { | ||
640 | if (WARN_ON(offset_in_page(IDX[k]) + | ||
641 | template[i].tap[k] > PAGE_SIZE)) | ||
642 | goto out; | ||
669 | 643 | ||
670 | memset(q, 0, template[i].tap[k]); | 644 | q = xbuf[IDX[k] >> PAGE_SHIFT] + offset_in_page(IDX[k]); |
645 | memcpy(q, template[i].input + temp, template[i].tap[k]); | ||
646 | sg_set_buf(&sg[k], q, template[i].tap[k]); | ||
671 | 647 | ||
672 | sg_set_buf(&sgout[k], q, | 648 | if (diff_dst) { |
673 | template[i].tap[k]); | 649 | q = xoutbuf[IDX[k] >> PAGE_SHIFT] + |
674 | } | 650 | offset_in_page(IDX[k]); |
675 | 651 | ||
676 | n = template[i].tap[k]; | 652 | memset(q, 0, template[i].tap[k]); |
677 | if (k == template[i].np - 1 && enc) | ||
678 | n += authsize; | ||
679 | if (offset_in_page(q) + n < PAGE_SIZE) | ||
680 | q[n] = 0; | ||
681 | 653 | ||
682 | temp += template[i].tap[k]; | 654 | sg_set_buf(&sgout[k], q, template[i].tap[k]); |
683 | } | 655 | } |
684 | 656 | ||
685 | ret = crypto_aead_setauthsize(tfm, authsize); | 657 | n = template[i].tap[k]; |
686 | if (ret) { | 658 | if (k == template[i].np - 1 && enc) |
687 | pr_err("alg: aead%s: Failed to set authsize to %u on chunk test %d for %s\n", | 659 | n += authsize; |
688 | d, authsize, j, algo); | 660 | if (offset_in_page(q) + n < PAGE_SIZE) |
661 | q[n] = 0; | ||
662 | |||
663 | temp += template[i].tap[k]; | ||
664 | } | ||
665 | |||
666 | ret = crypto_aead_setauthsize(tfm, authsize); | ||
667 | if (ret) { | ||
668 | pr_err("alg: aead%s: Failed to set authsize to %u on chunk test %d for %s\n", | ||
669 | d, authsize, j, algo); | ||
670 | goto out; | ||
671 | } | ||
672 | |||
673 | if (enc) { | ||
674 | if (WARN_ON(sg[k - 1].offset + | ||
675 | sg[k - 1].length + authsize > | ||
676 | PAGE_SIZE)) { | ||
677 | ret = -EINVAL; | ||
689 | goto out; | 678 | goto out; |
690 | } | 679 | } |
691 | 680 | ||
692 | if (enc) { | 681 | if (diff_dst) |
693 | if (WARN_ON(sg[k - 1].offset + | 682 | sgout[k - 1].length += authsize; |
694 | sg[k - 1].length + authsize > | 683 | else |
695 | PAGE_SIZE)) { | 684 | sg[k - 1].length += authsize; |
696 | ret = -EINVAL; | 685 | } |
697 | goto out; | ||
698 | } | ||
699 | 686 | ||
700 | if (diff_dst) | 687 | sg_init_table(asg, template[i].anp); |
701 | sgout[k - 1].length += authsize; | 688 | ret = -EINVAL; |
702 | else | 689 | for (k = 0, temp = 0; k < template[i].anp; k++) { |
703 | sg[k - 1].length += authsize; | 690 | if (WARN_ON(offset_in_page(IDX[k]) + |
691 | template[i].atap[k] > PAGE_SIZE)) | ||
692 | goto out; | ||
693 | sg_set_buf(&asg[k], | ||
694 | memcpy(axbuf[IDX[k] >> PAGE_SHIFT] + | ||
695 | offset_in_page(IDX[k]), | ||
696 | template[i].assoc + temp, | ||
697 | template[i].atap[k]), | ||
698 | template[i].atap[k]); | ||
699 | temp += template[i].atap[k]; | ||
700 | } | ||
701 | |||
702 | aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, | ||
703 | template[i].ilen, | ||
704 | iv); | ||
705 | |||
706 | aead_request_set_assoc(req, asg, template[i].alen); | ||
707 | |||
708 | ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); | ||
709 | |||
710 | switch (ret) { | ||
711 | case 0: | ||
712 | if (template[i].novrfy) { | ||
713 | /* verification was supposed to fail */ | ||
714 | pr_err("alg: aead%s: %s failed on chunk test %d for %s: ret was 0, expected -EBADMSG\n", | ||
715 | d, e, j, algo); | ||
716 | /* so really, we got a bad message */ | ||
717 | ret = -EBADMSG; | ||
718 | goto out; | ||
704 | } | 719 | } |
705 | 720 | break; | |
706 | sg_init_table(asg, template[i].anp); | 721 | case -EINPROGRESS: |
707 | ret = -EINVAL; | 722 | case -EBUSY: |
708 | for (k = 0, temp = 0; k < template[i].anp; k++) { | 723 | ret = wait_for_completion_interruptible( |
709 | if (WARN_ON(offset_in_page(IDX[k]) + | 724 | &result.completion); |
710 | template[i].atap[k] > PAGE_SIZE)) | 725 | if (!ret && !(ret = result.err)) { |
711 | goto out; | 726 | reinit_completion(&result.completion); |
712 | sg_set_buf(&asg[k], | ||
713 | memcpy(axbuf[IDX[k] >> PAGE_SHIFT] + | ||
714 | offset_in_page(IDX[k]), | ||
715 | template[i].assoc + temp, | ||
716 | template[i].atap[k]), | ||
717 | template[i].atap[k]); | ||
718 | temp += template[i].atap[k]; | ||
719 | } | ||
720 | |||
721 | aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, | ||
722 | template[i].ilen, | ||
723 | iv); | ||
724 | |||
725 | aead_request_set_assoc(req, asg, template[i].alen); | ||
726 | |||
727 | ret = enc ? | ||
728 | crypto_aead_encrypt(req) : | ||
729 | crypto_aead_decrypt(req); | ||
730 | |||
731 | switch (ret) { | ||
732 | case 0: | ||
733 | if (template[i].novrfy) { | ||
734 | /* verification was supposed to fail */ | ||
735 | pr_err("alg: aead%s: %s failed on chunk test %d for %s: ret was 0, expected -EBADMSG\n", | ||
736 | d, e, j, algo); | ||
737 | /* so really, we got a bad message */ | ||
738 | ret = -EBADMSG; | ||
739 | goto out; | ||
740 | } | ||
741 | break; | 727 | break; |
742 | case -EINPROGRESS: | ||
743 | case -EBUSY: | ||
744 | ret = wait_for_completion_interruptible( | ||
745 | &result.completion); | ||
746 | if (!ret && !(ret = result.err)) { | ||
747 | reinit_completion(&result.completion); | ||
748 | break; | ||
749 | } | ||
750 | case -EBADMSG: | ||
751 | if (template[i].novrfy) | ||
752 | /* verification failure was expected */ | ||
753 | continue; | ||
754 | /* fall through */ | ||
755 | default: | ||
756 | pr_err("alg: aead%s: %s failed on chunk test %d for %s: ret=%d\n", | ||
757 | d, e, j, algo, -ret); | ||
758 | goto out; | ||
759 | } | 728 | } |
729 | case -EBADMSG: | ||
730 | if (template[i].novrfy) | ||
731 | /* verification failure was expected */ | ||
732 | continue; | ||
733 | /* fall through */ | ||
734 | default: | ||
735 | pr_err("alg: aead%s: %s failed on chunk test %d for %s: ret=%d\n", | ||
736 | d, e, j, algo, -ret); | ||
737 | goto out; | ||
738 | } | ||
760 | 739 | ||
761 | ret = -EINVAL; | 740 | ret = -EINVAL; |
762 | for (k = 0, temp = 0; k < template[i].np; k++) { | 741 | for (k = 0, temp = 0; k < template[i].np; k++) { |
763 | if (diff_dst) | 742 | if (diff_dst) |
764 | q = xoutbuf[IDX[k] >> PAGE_SHIFT] + | 743 | q = xoutbuf[IDX[k] >> PAGE_SHIFT] + |
765 | offset_in_page(IDX[k]); | 744 | offset_in_page(IDX[k]); |
766 | else | 745 | else |
767 | q = xbuf[IDX[k] >> PAGE_SHIFT] + | 746 | q = xbuf[IDX[k] >> PAGE_SHIFT] + |
768 | offset_in_page(IDX[k]); | 747 | offset_in_page(IDX[k]); |
769 | |||
770 | n = template[i].tap[k]; | ||
771 | if (k == template[i].np - 1) | ||
772 | n += enc ? authsize : -authsize; | ||
773 | 748 | ||
774 | if (memcmp(q, template[i].result + temp, n)) { | 749 | n = template[i].tap[k]; |
775 | pr_err("alg: aead%s: Chunk test %d failed on %s at page %u for %s\n", | 750 | if (k == template[i].np - 1) |
776 | d, j, e, k, algo); | 751 | n += enc ? authsize : -authsize; |
777 | hexdump(q, n); | ||
778 | goto out; | ||
779 | } | ||
780 | 752 | ||
781 | q += n; | 753 | if (memcmp(q, template[i].result + temp, n)) { |
782 | if (k == template[i].np - 1 && !enc) { | 754 | pr_err("alg: aead%s: Chunk test %d failed on %s at page %u for %s\n", |
783 | if (!diff_dst && | 755 | d, j, e, k, algo); |
784 | memcmp(q, template[i].input + | 756 | hexdump(q, n); |
785 | temp + n, authsize)) | 757 | goto out; |
786 | n = authsize; | 758 | } |
787 | else | ||
788 | n = 0; | ||
789 | } else { | ||
790 | for (n = 0; offset_in_page(q + n) && | ||
791 | q[n]; n++) | ||
792 | ; | ||
793 | } | ||
794 | if (n) { | ||
795 | pr_err("alg: aead%s: Result buffer corruption in chunk test %d on %s at page %u for %s: %u bytes:\n", | ||
796 | d, j, e, k, algo, n); | ||
797 | hexdump(q, n); | ||
798 | goto out; | ||
799 | } | ||
800 | 759 | ||
801 | temp += template[i].tap[k]; | 760 | q += n; |
761 | if (k == template[i].np - 1 && !enc) { | ||
762 | if (!diff_dst && | ||
763 | memcmp(q, template[i].input + | ||
764 | temp + n, authsize)) | ||
765 | n = authsize; | ||
766 | else | ||
767 | n = 0; | ||
768 | } else { | ||
769 | for (n = 0; offset_in_page(q + n) && q[n]; n++) | ||
770 | ; | ||
771 | } | ||
772 | if (n) { | ||
773 | pr_err("alg: aead%s: Result buffer corruption in chunk test %d on %s at page %u for %s: %u bytes:\n", | ||
774 | d, j, e, k, algo, n); | ||
775 | hexdump(q, n); | ||
776 | goto out; | ||
802 | } | 777 | } |
778 | |||
779 | temp += template[i].tap[k]; | ||
803 | } | 780 | } |
804 | } | 781 | } |
805 | 782 | ||
@@ -978,78 +955,73 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc, | |||
978 | 955 | ||
979 | j = 0; | 956 | j = 0; |
980 | for (i = 0; i < tcount; i++) { | 957 | for (i = 0; i < tcount; i++) { |
958 | if (template[i].np && !template[i].also_non_np) | ||
959 | continue; | ||
960 | |||
981 | if (template[i].iv) | 961 | if (template[i].iv) |
982 | memcpy(iv, template[i].iv, MAX_IVLEN); | 962 | memcpy(iv, template[i].iv, MAX_IVLEN); |
983 | else | 963 | else |
984 | memset(iv, 0, MAX_IVLEN); | 964 | memset(iv, 0, MAX_IVLEN); |
985 | 965 | ||
986 | if (!(template[i].np) || (template[i].also_non_np)) { | 966 | j++; |
987 | j++; | 967 | ret = -EINVAL; |
968 | if (WARN_ON(align_offset + template[i].ilen > PAGE_SIZE)) | ||
969 | goto out; | ||
988 | 970 | ||
989 | ret = -EINVAL; | 971 | data = xbuf[0]; |
990 | if (WARN_ON(align_offset + template[i].ilen > | 972 | data += align_offset; |
991 | PAGE_SIZE)) | 973 | memcpy(data, template[i].input, template[i].ilen); |
992 | goto out; | ||
993 | 974 | ||
994 | data = xbuf[0]; | 975 | crypto_ablkcipher_clear_flags(tfm, ~0); |
995 | data += align_offset; | 976 | if (template[i].wk) |
996 | memcpy(data, template[i].input, template[i].ilen); | 977 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY); |
997 | |||
998 | crypto_ablkcipher_clear_flags(tfm, ~0); | ||
999 | if (template[i].wk) | ||
1000 | crypto_ablkcipher_set_flags( | ||
1001 | tfm, CRYPTO_TFM_REQ_WEAK_KEY); | ||
1002 | |||
1003 | ret = crypto_ablkcipher_setkey(tfm, template[i].key, | ||
1004 | template[i].klen); | ||
1005 | if (!ret == template[i].fail) { | ||
1006 | pr_err("alg: skcipher%s: setkey failed on test %d for %s: flags=%x\n", | ||
1007 | d, j, algo, | ||
1008 | crypto_ablkcipher_get_flags(tfm)); | ||
1009 | goto out; | ||
1010 | } else if (ret) | ||
1011 | continue; | ||
1012 | 978 | ||
1013 | sg_init_one(&sg[0], data, template[i].ilen); | 979 | ret = crypto_ablkcipher_setkey(tfm, template[i].key, |
1014 | if (diff_dst) { | 980 | template[i].klen); |
1015 | data = xoutbuf[0]; | 981 | if (!ret == template[i].fail) { |
1016 | data += align_offset; | 982 | pr_err("alg: skcipher%s: setkey failed on test %d for %s: flags=%x\n", |
1017 | sg_init_one(&sgout[0], data, template[i].ilen); | 983 | d, j, algo, crypto_ablkcipher_get_flags(tfm)); |
1018 | } | 984 | goto out; |
985 | } else if (ret) | ||
986 | continue; | ||
987 | |||
988 | sg_init_one(&sg[0], data, template[i].ilen); | ||
989 | if (diff_dst) { | ||
990 | data = xoutbuf[0]; | ||
991 | data += align_offset; | ||
992 | sg_init_one(&sgout[0], data, template[i].ilen); | ||
993 | } | ||
1019 | 994 | ||
1020 | ablkcipher_request_set_crypt(req, sg, | 995 | ablkcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, |
1021 | (diff_dst) ? sgout : sg, | 996 | template[i].ilen, iv); |
1022 | template[i].ilen, iv); | 997 | ret = enc ? crypto_ablkcipher_encrypt(req) : |
1023 | ret = enc ? | 998 | crypto_ablkcipher_decrypt(req); |
1024 | crypto_ablkcipher_encrypt(req) : | ||
1025 | crypto_ablkcipher_decrypt(req); | ||
1026 | 999 | ||
1027 | switch (ret) { | 1000 | switch (ret) { |
1028 | case 0: | 1001 | case 0: |
1002 | break; | ||
1003 | case -EINPROGRESS: | ||
1004 | case -EBUSY: | ||
1005 | ret = wait_for_completion_interruptible( | ||
1006 | &result.completion); | ||
1007 | if (!ret && !((ret = result.err))) { | ||
1008 | reinit_completion(&result.completion); | ||
1029 | break; | 1009 | break; |
1030 | case -EINPROGRESS: | ||
1031 | case -EBUSY: | ||
1032 | ret = wait_for_completion_interruptible( | ||
1033 | &result.completion); | ||
1034 | if (!ret && !((ret = result.err))) { | ||
1035 | reinit_completion(&result.completion); | ||
1036 | break; | ||
1037 | } | ||
1038 | /* fall through */ | ||
1039 | default: | ||
1040 | pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n", | ||
1041 | d, e, j, algo, -ret); | ||
1042 | goto out; | ||
1043 | } | 1010 | } |
1011 | /* fall through */ | ||
1012 | default: | ||
1013 | pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n", | ||
1014 | d, e, j, algo, -ret); | ||
1015 | goto out; | ||
1016 | } | ||
1044 | 1017 | ||
1045 | q = data; | 1018 | q = data; |
1046 | if (memcmp(q, template[i].result, template[i].rlen)) { | 1019 | if (memcmp(q, template[i].result, template[i].rlen)) { |
1047 | pr_err("alg: skcipher%s: Test %d failed on %s for %s\n", | 1020 | pr_err("alg: skcipher%s: Test %d failed on %s for %s\n", |
1048 | d, j, e, algo); | 1021 | d, j, e, algo); |
1049 | hexdump(q, template[i].rlen); | 1022 | hexdump(q, template[i].rlen); |
1050 | ret = -EINVAL; | 1023 | ret = -EINVAL; |
1051 | goto out; | 1024 | goto out; |
1052 | } | ||
1053 | } | 1025 | } |
1054 | } | 1026 | } |
1055 | 1027 | ||
@@ -1059,121 +1031,113 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc, | |||
1059 | if (align_offset != 0) | 1031 | if (align_offset != 0) |
1060 | break; | 1032 | break; |
1061 | 1033 | ||
1034 | if (!template[i].np) | ||
1035 | continue; | ||
1036 | |||
1062 | if (template[i].iv) | 1037 | if (template[i].iv) |
1063 | memcpy(iv, template[i].iv, MAX_IVLEN); | 1038 | memcpy(iv, template[i].iv, MAX_IVLEN); |
1064 | else | 1039 | else |
1065 | memset(iv, 0, MAX_IVLEN); | 1040 | memset(iv, 0, MAX_IVLEN); |
1066 | 1041 | ||
1067 | if (template[i].np) { | 1042 | j++; |
1068 | j++; | 1043 | crypto_ablkcipher_clear_flags(tfm, ~0); |
1044 | if (template[i].wk) | ||
1045 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY); | ||
1069 | 1046 | ||
1070 | crypto_ablkcipher_clear_flags(tfm, ~0); | 1047 | ret = crypto_ablkcipher_setkey(tfm, template[i].key, |
1071 | if (template[i].wk) | 1048 | template[i].klen); |
1072 | crypto_ablkcipher_set_flags( | 1049 | if (!ret == template[i].fail) { |
1073 | tfm, CRYPTO_TFM_REQ_WEAK_KEY); | 1050 | pr_err("alg: skcipher%s: setkey failed on chunk test %d for %s: flags=%x\n", |
1051 | d, j, algo, crypto_ablkcipher_get_flags(tfm)); | ||
1052 | goto out; | ||
1053 | } else if (ret) | ||
1054 | continue; | ||
1074 | 1055 | ||
1075 | ret = crypto_ablkcipher_setkey(tfm, template[i].key, | 1056 | temp = 0; |
1076 | template[i].klen); | 1057 | ret = -EINVAL; |
1077 | if (!ret == template[i].fail) { | 1058 | sg_init_table(sg, template[i].np); |
1078 | pr_err("alg: skcipher%s: setkey failed on chunk test %d for %s: flags=%x\n", | 1059 | if (diff_dst) |
1079 | d, j, algo, | 1060 | sg_init_table(sgout, template[i].np); |
1080 | crypto_ablkcipher_get_flags(tfm)); | 1061 | for (k = 0; k < template[i].np; k++) { |
1062 | if (WARN_ON(offset_in_page(IDX[k]) + | ||
1063 | template[i].tap[k] > PAGE_SIZE)) | ||
1081 | goto out; | 1064 | goto out; |
1082 | } else if (ret) | ||
1083 | continue; | ||
1084 | 1065 | ||
1085 | temp = 0; | 1066 | q = xbuf[IDX[k] >> PAGE_SHIFT] + offset_in_page(IDX[k]); |
1086 | ret = -EINVAL; | ||
1087 | sg_init_table(sg, template[i].np); | ||
1088 | if (diff_dst) | ||
1089 | sg_init_table(sgout, template[i].np); | ||
1090 | for (k = 0; k < template[i].np; k++) { | ||
1091 | if (WARN_ON(offset_in_page(IDX[k]) + | ||
1092 | template[i].tap[k] > PAGE_SIZE)) | ||
1093 | goto out; | ||
1094 | 1067 | ||
1095 | q = xbuf[IDX[k] >> PAGE_SHIFT] + | 1068 | memcpy(q, template[i].input + temp, template[i].tap[k]); |
1069 | |||
1070 | if (offset_in_page(q) + template[i].tap[k] < PAGE_SIZE) | ||
1071 | q[template[i].tap[k]] = 0; | ||
1072 | |||
1073 | sg_set_buf(&sg[k], q, template[i].tap[k]); | ||
1074 | if (diff_dst) { | ||
1075 | q = xoutbuf[IDX[k] >> PAGE_SHIFT] + | ||
1096 | offset_in_page(IDX[k]); | 1076 | offset_in_page(IDX[k]); |
1097 | 1077 | ||
1098 | memcpy(q, template[i].input + temp, | 1078 | sg_set_buf(&sgout[k], q, template[i].tap[k]); |
1099 | template[i].tap[k]); | ||
1100 | 1079 | ||
1101 | if (offset_in_page(q) + template[i].tap[k] < | 1080 | memset(q, 0, template[i].tap[k]); |
1102 | PAGE_SIZE) | 1081 | if (offset_in_page(q) + |
1082 | template[i].tap[k] < PAGE_SIZE) | ||
1103 | q[template[i].tap[k]] = 0; | 1083 | q[template[i].tap[k]] = 0; |
1084 | } | ||
1104 | 1085 | ||
1105 | sg_set_buf(&sg[k], q, template[i].tap[k]); | 1086 | temp += template[i].tap[k]; |
1106 | if (diff_dst) { | 1087 | } |
1107 | q = xoutbuf[IDX[k] >> PAGE_SHIFT] + | ||
1108 | offset_in_page(IDX[k]); | ||
1109 | 1088 | ||
1110 | sg_set_buf(&sgout[k], q, | 1089 | ablkcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, |
1111 | template[i].tap[k]); | 1090 | template[i].ilen, iv); |
1112 | 1091 | ||
1113 | memset(q, 0, template[i].tap[k]); | 1092 | ret = enc ? crypto_ablkcipher_encrypt(req) : |
1114 | if (offset_in_page(q) + | 1093 | crypto_ablkcipher_decrypt(req); |
1115 | template[i].tap[k] < PAGE_SIZE) | ||
1116 | q[template[i].tap[k]] = 0; | ||
1117 | } | ||
1118 | 1094 | ||
1119 | temp += template[i].tap[k]; | 1095 | switch (ret) { |
1096 | case 0: | ||
1097 | break; | ||
1098 | case -EINPROGRESS: | ||
1099 | case -EBUSY: | ||
1100 | ret = wait_for_completion_interruptible( | ||
1101 | &result.completion); | ||
1102 | if (!ret && !((ret = result.err))) { | ||
1103 | reinit_completion(&result.completion); | ||
1104 | break; | ||
1120 | } | 1105 | } |
1106 | /* fall through */ | ||
1107 | default: | ||
1108 | pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n", | ||
1109 | d, e, j, algo, -ret); | ||
1110 | goto out; | ||
1111 | } | ||
1121 | 1112 | ||
1122 | ablkcipher_request_set_crypt(req, sg, | 1113 | temp = 0; |
1123 | (diff_dst) ? sgout : sg, | 1114 | ret = -EINVAL; |
1124 | template[i].ilen, iv); | 1115 | for (k = 0; k < template[i].np; k++) { |
1125 | 1116 | if (diff_dst) | |
1126 | ret = enc ? | 1117 | q = xoutbuf[IDX[k] >> PAGE_SHIFT] + |
1127 | crypto_ablkcipher_encrypt(req) : | 1118 | offset_in_page(IDX[k]); |
1128 | crypto_ablkcipher_decrypt(req); | 1119 | else |
1120 | q = xbuf[IDX[k] >> PAGE_SHIFT] + | ||
1121 | offset_in_page(IDX[k]); | ||
1129 | 1122 | ||
1130 | switch (ret) { | 1123 | if (memcmp(q, template[i].result + temp, |
1131 | case 0: | 1124 | template[i].tap[k])) { |
1132 | break; | 1125 | pr_err("alg: skcipher%s: Chunk test %d failed on %s at page %u for %s\n", |
1133 | case -EINPROGRESS: | 1126 | d, j, e, k, algo); |
1134 | case -EBUSY: | 1127 | hexdump(q, template[i].tap[k]); |
1135 | ret = wait_for_completion_interruptible( | ||
1136 | &result.completion); | ||
1137 | if (!ret && !((ret = result.err))) { | ||
1138 | reinit_completion(&result.completion); | ||
1139 | break; | ||
1140 | } | ||
1141 | /* fall through */ | ||
1142 | default: | ||
1143 | pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n", | ||
1144 | d, e, j, algo, -ret); | ||
1145 | goto out; | 1128 | goto out; |
1146 | } | 1129 | } |
1147 | 1130 | ||
1148 | temp = 0; | 1131 | q += template[i].tap[k]; |
1149 | ret = -EINVAL; | 1132 | for (n = 0; offset_in_page(q + n) && q[n]; n++) |
1150 | for (k = 0; k < template[i].np; k++) { | 1133 | ; |
1151 | if (diff_dst) | 1134 | if (n) { |
1152 | q = xoutbuf[IDX[k] >> PAGE_SHIFT] + | 1135 | pr_err("alg: skcipher%s: Result buffer corruption in chunk test %d on %s at page %u for %s: %u bytes:\n", |
1153 | offset_in_page(IDX[k]); | 1136 | d, j, e, k, algo, n); |
1154 | else | 1137 | hexdump(q, n); |
1155 | q = xbuf[IDX[k] >> PAGE_SHIFT] + | 1138 | goto out; |
1156 | offset_in_page(IDX[k]); | ||
1157 | |||
1158 | if (memcmp(q, template[i].result + temp, | ||
1159 | template[i].tap[k])) { | ||
1160 | pr_err("alg: skcipher%s: Chunk test %d failed on %s at page %u for %s\n", | ||
1161 | d, j, e, k, algo); | ||
1162 | hexdump(q, template[i].tap[k]); | ||
1163 | goto out; | ||
1164 | } | ||
1165 | |||
1166 | q += template[i].tap[k]; | ||
1167 | for (n = 0; offset_in_page(q + n) && q[n]; n++) | ||
1168 | ; | ||
1169 | if (n) { | ||
1170 | pr_err("alg: skcipher%s: Result buffer corruption in chunk test %d on %s at page %u for %s: %u bytes:\n", | ||
1171 | d, j, e, k, algo, n); | ||
1172 | hexdump(q, n); | ||
1173 | goto out; | ||
1174 | } | ||
1175 | temp += template[i].tap[k]; | ||
1176 | } | 1139 | } |
1140 | temp += template[i].tap[k]; | ||
1177 | } | 1141 | } |
1178 | } | 1142 | } |
1179 | 1143 | ||
@@ -3213,6 +3177,38 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
3213 | } | 3177 | } |
3214 | } | 3178 | } |
3215 | }, { | 3179 | }, { |
3180 | .alg = "lz4", | ||
3181 | .test = alg_test_comp, | ||
3182 | .fips_allowed = 1, | ||
3183 | .suite = { | ||
3184 | .comp = { | ||
3185 | .comp = { | ||
3186 | .vecs = lz4_comp_tv_template, | ||
3187 | .count = LZ4_COMP_TEST_VECTORS | ||
3188 | }, | ||
3189 | .decomp = { | ||
3190 | .vecs = lz4_decomp_tv_template, | ||
3191 | .count = LZ4_DECOMP_TEST_VECTORS | ||
3192 | } | ||
3193 | } | ||
3194 | } | ||
3195 | }, { | ||
3196 | .alg = "lz4hc", | ||
3197 | .test = alg_test_comp, | ||
3198 | .fips_allowed = 1, | ||
3199 | .suite = { | ||
3200 | .comp = { | ||
3201 | .comp = { | ||
3202 | .vecs = lz4hc_comp_tv_template, | ||
3203 | .count = LZ4HC_COMP_TEST_VECTORS | ||
3204 | }, | ||
3205 | .decomp = { | ||
3206 | .vecs = lz4hc_decomp_tv_template, | ||
3207 | .count = LZ4HC_DECOMP_TEST_VECTORS | ||
3208 | } | ||
3209 | } | ||
3210 | } | ||
3211 | }, { | ||
3216 | .alg = "lzo", | 3212 | .alg = "lzo", |
3217 | .test = alg_test_comp, | 3213 | .test = alg_test_comp, |
3218 | .fips_allowed = 1, | 3214 | .fips_allowed = 1, |
diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 6597203eccfa..62e2485bb428 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h | |||
@@ -29473,4 +29473,70 @@ static struct hash_testvec bfin_crc_tv_template[] = { | |||
29473 | 29473 | ||
29474 | }; | 29474 | }; |
29475 | 29475 | ||
29476 | #define LZ4_COMP_TEST_VECTORS 1 | ||
29477 | #define LZ4_DECOMP_TEST_VECTORS 1 | ||
29478 | |||
29479 | static struct comp_testvec lz4_comp_tv_template[] = { | ||
29480 | { | ||
29481 | .inlen = 70, | ||
29482 | .outlen = 45, | ||
29483 | .input = "Join us now and share the software " | ||
29484 | "Join us now and share the software ", | ||
29485 | .output = "\xf0\x10\x4a\x6f\x69\x6e\x20\x75" | ||
29486 | "\x73\x20\x6e\x6f\x77\x20\x61\x6e" | ||
29487 | "\x64\x20\x73\x68\x61\x72\x65\x20" | ||
29488 | "\x74\x68\x65\x20\x73\x6f\x66\x74" | ||
29489 | "\x77\x0d\x00\x0f\x23\x00\x0b\x50" | ||
29490 | "\x77\x61\x72\x65\x20", | ||
29491 | }, | ||
29492 | }; | ||
29493 | |||
29494 | static struct comp_testvec lz4_decomp_tv_template[] = { | ||
29495 | { | ||
29496 | .inlen = 45, | ||
29497 | .outlen = 70, | ||
29498 | .input = "\xf0\x10\x4a\x6f\x69\x6e\x20\x75" | ||
29499 | "\x73\x20\x6e\x6f\x77\x20\x61\x6e" | ||
29500 | "\x64\x20\x73\x68\x61\x72\x65\x20" | ||
29501 | "\x74\x68\x65\x20\x73\x6f\x66\x74" | ||
29502 | "\x77\x0d\x00\x0f\x23\x00\x0b\x50" | ||
29503 | "\x77\x61\x72\x65\x20", | ||
29504 | .output = "Join us now and share the software " | ||
29505 | "Join us now and share the software ", | ||
29506 | }, | ||
29507 | }; | ||
29508 | |||
29509 | #define LZ4HC_COMP_TEST_VECTORS 1 | ||
29510 | #define LZ4HC_DECOMP_TEST_VECTORS 1 | ||
29511 | |||
29512 | static struct comp_testvec lz4hc_comp_tv_template[] = { | ||
29513 | { | ||
29514 | .inlen = 70, | ||
29515 | .outlen = 45, | ||
29516 | .input = "Join us now and share the software " | ||
29517 | "Join us now and share the software ", | ||
29518 | .output = "\xf0\x10\x4a\x6f\x69\x6e\x20\x75" | ||
29519 | "\x73\x20\x6e\x6f\x77\x20\x61\x6e" | ||
29520 | "\x64\x20\x73\x68\x61\x72\x65\x20" | ||
29521 | "\x74\x68\x65\x20\x73\x6f\x66\x74" | ||
29522 | "\x77\x0d\x00\x0f\x23\x00\x0b\x50" | ||
29523 | "\x77\x61\x72\x65\x20", | ||
29524 | }, | ||
29525 | }; | ||
29526 | |||
29527 | static struct comp_testvec lz4hc_decomp_tv_template[] = { | ||
29528 | { | ||
29529 | .inlen = 45, | ||
29530 | .outlen = 70, | ||
29531 | .input = "\xf0\x10\x4a\x6f\x69\x6e\x20\x75" | ||
29532 | "\x73\x20\x6e\x6f\x77\x20\x61\x6e" | ||
29533 | "\x64\x20\x73\x68\x61\x72\x65\x20" | ||
29534 | "\x74\x68\x65\x20\x73\x6f\x66\x74" | ||
29535 | "\x77\x0d\x00\x0f\x23\x00\x0b\x50" | ||
29536 | "\x77\x61\x72\x65\x20", | ||
29537 | .output = "Join us now and share the software " | ||
29538 | "Join us now and share the software ", | ||
29539 | }, | ||
29540 | }; | ||
29541 | |||
29476 | #endif /* _CRYPTO_TESTMGR_H */ | 29542 | #endif /* _CRYPTO_TESTMGR_H */ |