diff options
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/Kconfig | 23 | ||||
-rw-r--r-- | crypto/Makefile | 5 | ||||
-rw-r--r-- | crypto/ahash.c | 24 | ||||
-rw-r--r-- | crypto/algapi.c | 15 | ||||
-rw-r--r-- | crypto/crc32_generic.c (renamed from crypto/crc32.c) | 3 | ||||
-rw-r--r-- | crypto/crypto_engine.c | 355 | ||||
-rw-r--r-- | crypto/drbg.c | 64 | ||||
-rw-r--r-- | crypto/internal.h | 3 | ||||
-rw-r--r-- | crypto/keywrap.c | 4 | ||||
-rw-r--r-- | crypto/mcryptd.c | 1 | ||||
-rw-r--r-- | crypto/pcompress.c | 115 | ||||
-rw-r--r-- | crypto/shash.c | 147 | ||||
-rw-r--r-- | crypto/skcipher.c | 4 | ||||
-rw-r--r-- | crypto/tcrypt.c | 239 | ||||
-rw-r--r-- | crypto/testmgr.c | 401 | ||||
-rw-r--r-- | crypto/testmgr.h | 144 | ||||
-rw-r--r-- | crypto/xts.c | 11 | ||||
-rw-r--r-- | crypto/zlib.c | 381 |
18 files changed, 585 insertions, 1354 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig index 3be07ad1d80d..93a1fdc1feee 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
@@ -84,15 +84,6 @@ config CRYPTO_RNG_DEFAULT | |||
84 | tristate | 84 | tristate |
85 | select CRYPTO_DRBG_MENU | 85 | select CRYPTO_DRBG_MENU |
86 | 86 | ||
87 | config CRYPTO_PCOMP | ||
88 | tristate | ||
89 | select CRYPTO_PCOMP2 | ||
90 | select CRYPTO_ALGAPI | ||
91 | |||
92 | config CRYPTO_PCOMP2 | ||
93 | tristate | ||
94 | select CRYPTO_ALGAPI2 | ||
95 | |||
96 | config CRYPTO_AKCIPHER2 | 87 | config CRYPTO_AKCIPHER2 |
97 | tristate | 88 | tristate |
98 | select CRYPTO_ALGAPI2 | 89 | select CRYPTO_ALGAPI2 |
@@ -122,7 +113,6 @@ config CRYPTO_MANAGER2 | |||
122 | select CRYPTO_AEAD2 | 113 | select CRYPTO_AEAD2 |
123 | select CRYPTO_HASH2 | 114 | select CRYPTO_HASH2 |
124 | select CRYPTO_BLKCIPHER2 | 115 | select CRYPTO_BLKCIPHER2 |
125 | select CRYPTO_PCOMP2 | ||
126 | select CRYPTO_AKCIPHER2 | 116 | select CRYPTO_AKCIPHER2 |
127 | 117 | ||
128 | config CRYPTO_USER | 118 | config CRYPTO_USER |
@@ -227,6 +217,9 @@ config CRYPTO_GLUE_HELPER_X86 | |||
227 | depends on X86 | 217 | depends on X86 |
228 | select CRYPTO_ALGAPI | 218 | select CRYPTO_ALGAPI |
229 | 219 | ||
220 | config CRYPTO_ENGINE | ||
221 | tristate | ||
222 | |||
230 | comment "Authenticated Encryption with Associated Data" | 223 | comment "Authenticated Encryption with Associated Data" |
231 | 224 | ||
232 | config CRYPTO_CCM | 225 | config CRYPTO_CCM |
@@ -1506,15 +1499,6 @@ config CRYPTO_DEFLATE | |||
1506 | 1499 | ||
1507 | You will most probably want this if using IPSec. | 1500 | You will most probably want this if using IPSec. |
1508 | 1501 | ||
1509 | config CRYPTO_ZLIB | ||
1510 | tristate "Zlib compression algorithm" | ||
1511 | select CRYPTO_PCOMP | ||
1512 | select ZLIB_INFLATE | ||
1513 | select ZLIB_DEFLATE | ||
1514 | select NLATTR | ||
1515 | help | ||
1516 | This is the zlib algorithm. | ||
1517 | |||
1518 | config CRYPTO_LZO | 1502 | config CRYPTO_LZO |
1519 | tristate "LZO compression algorithm" | 1503 | tristate "LZO compression algorithm" |
1520 | select CRYPTO_ALGAPI | 1504 | select CRYPTO_ALGAPI |
@@ -1595,6 +1579,7 @@ endif # if CRYPTO_DRBG_MENU | |||
1595 | 1579 | ||
1596 | config CRYPTO_JITTERENTROPY | 1580 | config CRYPTO_JITTERENTROPY |
1597 | tristate "Jitterentropy Non-Deterministic Random Number Generator" | 1581 | tristate "Jitterentropy Non-Deterministic Random Number Generator" |
1582 | select CRYPTO_RNG | ||
1598 | help | 1583 | help |
1599 | The Jitterentropy RNG is a noise that is intended | 1584 | The Jitterentropy RNG is a noise that is intended |
1600 | to provide seed to another RNG. The RNG does not | 1585 | to provide seed to another RNG. The RNG does not |
diff --git a/crypto/Makefile b/crypto/Makefile index 2acdbbd30475..4f4ef7eaae3f 100644 --- a/crypto/Makefile +++ b/crypto/Makefile | |||
@@ -7,6 +7,7 @@ crypto-y := api.o cipher.o compress.o memneq.o | |||
7 | 7 | ||
8 | obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o | 8 | obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o |
9 | 9 | ||
10 | obj-$(CONFIG_CRYPTO_ENGINE) += crypto_engine.o | ||
10 | obj-$(CONFIG_CRYPTO_FIPS) += fips.o | 11 | obj-$(CONFIG_CRYPTO_FIPS) += fips.o |
11 | 12 | ||
12 | crypto_algapi-$(CONFIG_PROC_FS) += proc.o | 13 | crypto_algapi-$(CONFIG_PROC_FS) += proc.o |
@@ -28,7 +29,6 @@ crypto_hash-y += ahash.o | |||
28 | crypto_hash-y += shash.o | 29 | crypto_hash-y += shash.o |
29 | obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o | 30 | obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o |
30 | 31 | ||
31 | obj-$(CONFIG_CRYPTO_PCOMP2) += pcompress.o | ||
32 | obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o | 32 | obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o |
33 | 33 | ||
34 | $(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h | 34 | $(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h |
@@ -99,10 +99,9 @@ obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o | |||
99 | obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o | 99 | obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o |
100 | obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o | 100 | obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o |
101 | obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o | 101 | obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o |
102 | obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o | ||
103 | obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o | 102 | obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o |
104 | obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o | 103 | obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o |
105 | obj-$(CONFIG_CRYPTO_CRC32) += crc32.o | 104 | obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o |
106 | obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o | 105 | obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o |
107 | obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o | 106 | obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o |
108 | obj-$(CONFIG_CRYPTO_LZO) += lzo.o | 107 | obj-$(CONFIG_CRYPTO_LZO) += lzo.o |
diff --git a/crypto/ahash.c b/crypto/ahash.c index d19b52324cf5..5fc1f172963d 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c | |||
@@ -166,24 +166,6 @@ int crypto_ahash_walk_first(struct ahash_request *req, | |||
166 | } | 166 | } |
167 | EXPORT_SYMBOL_GPL(crypto_ahash_walk_first); | 167 | EXPORT_SYMBOL_GPL(crypto_ahash_walk_first); |
168 | 168 | ||
169 | int crypto_hash_walk_first_compat(struct hash_desc *hdesc, | ||
170 | struct crypto_hash_walk *walk, | ||
171 | struct scatterlist *sg, unsigned int len) | ||
172 | { | ||
173 | walk->total = len; | ||
174 | |||
175 | if (!walk->total) { | ||
176 | walk->entrylen = 0; | ||
177 | return 0; | ||
178 | } | ||
179 | |||
180 | walk->alignmask = crypto_hash_alignmask(hdesc->tfm); | ||
181 | walk->sg = sg; | ||
182 | walk->flags = hdesc->flags & CRYPTO_TFM_REQ_MASK; | ||
183 | |||
184 | return hash_walk_new_entry(walk); | ||
185 | } | ||
186 | |||
187 | static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, | 169 | static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, |
188 | unsigned int keylen) | 170 | unsigned int keylen) |
189 | { | 171 | { |
@@ -542,6 +524,12 @@ struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, | |||
542 | } | 524 | } |
543 | EXPORT_SYMBOL_GPL(crypto_alloc_ahash); | 525 | EXPORT_SYMBOL_GPL(crypto_alloc_ahash); |
544 | 526 | ||
527 | int crypto_has_ahash(const char *alg_name, u32 type, u32 mask) | ||
528 | { | ||
529 | return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask); | ||
530 | } | ||
531 | EXPORT_SYMBOL_GPL(crypto_has_ahash); | ||
532 | |||
545 | static int ahash_prepare_alg(struct ahash_alg *alg) | 533 | static int ahash_prepare_alg(struct ahash_alg *alg) |
546 | { | 534 | { |
547 | struct crypto_alg *base = &alg->halg.base; | 535 | struct crypto_alg *base = &alg->halg.base; |
diff --git a/crypto/algapi.c b/crypto/algapi.c index 7be76aa31579..731255a6104f 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c | |||
@@ -987,6 +987,21 @@ unsigned int crypto_alg_extsize(struct crypto_alg *alg) | |||
987 | } | 987 | } |
988 | EXPORT_SYMBOL_GPL(crypto_alg_extsize); | 988 | EXPORT_SYMBOL_GPL(crypto_alg_extsize); |
989 | 989 | ||
990 | int crypto_type_has_alg(const char *name, const struct crypto_type *frontend, | ||
991 | u32 type, u32 mask) | ||
992 | { | ||
993 | int ret = 0; | ||
994 | struct crypto_alg *alg = crypto_find_alg(name, frontend, type, mask); | ||
995 | |||
996 | if (!IS_ERR(alg)) { | ||
997 | crypto_mod_put(alg); | ||
998 | ret = 1; | ||
999 | } | ||
1000 | |||
1001 | return ret; | ||
1002 | } | ||
1003 | EXPORT_SYMBOL_GPL(crypto_type_has_alg); | ||
1004 | |||
990 | static int __init crypto_algapi_init(void) | 1005 | static int __init crypto_algapi_init(void) |
991 | { | 1006 | { |
992 | crypto_init_proc(); | 1007 | crypto_init_proc(); |
diff --git a/crypto/crc32.c b/crypto/crc32_generic.c index 187ded28cb0b..aa2a25fc7482 100644 --- a/crypto/crc32.c +++ b/crypto/crc32_generic.c | |||
@@ -131,7 +131,7 @@ static struct shash_alg alg = { | |||
131 | .digestsize = CHKSUM_DIGEST_SIZE, | 131 | .digestsize = CHKSUM_DIGEST_SIZE, |
132 | .base = { | 132 | .base = { |
133 | .cra_name = "crc32", | 133 | .cra_name = "crc32", |
134 | .cra_driver_name = "crc32-table", | 134 | .cra_driver_name = "crc32-generic", |
135 | .cra_priority = 100, | 135 | .cra_priority = 100, |
136 | .cra_blocksize = CHKSUM_BLOCK_SIZE, | 136 | .cra_blocksize = CHKSUM_BLOCK_SIZE, |
137 | .cra_ctxsize = sizeof(u32), | 137 | .cra_ctxsize = sizeof(u32), |
@@ -157,3 +157,4 @@ MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>"); | |||
157 | MODULE_DESCRIPTION("CRC32 calculations wrapper for lib/crc32"); | 157 | MODULE_DESCRIPTION("CRC32 calculations wrapper for lib/crc32"); |
158 | MODULE_LICENSE("GPL"); | 158 | MODULE_LICENSE("GPL"); |
159 | MODULE_ALIAS_CRYPTO("crc32"); | 159 | MODULE_ALIAS_CRYPTO("crc32"); |
160 | MODULE_ALIAS_CRYPTO("crc32-generic"); | ||
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c new file mode 100644 index 000000000000..a55c82dd48ef --- /dev/null +++ b/crypto/crypto_engine.c | |||
@@ -0,0 +1,355 @@ | |||
1 | /* | ||
2 | * Handle async block request by crypto hardware engine. | ||
3 | * | ||
4 | * Copyright (C) 2016 Linaro, Inc. | ||
5 | * | ||
6 | * Author: Baolin Wang <baolin.wang@linaro.org> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the Free | ||
10 | * Software Foundation; either version 2 of the License, or (at your option) | ||
11 | * any later version. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <linux/err.h> | ||
16 | #include <linux/delay.h> | ||
17 | #include "internal.h" | ||
18 | |||
19 | #define CRYPTO_ENGINE_MAX_QLEN 10 | ||
20 | |||
21 | void crypto_finalize_request(struct crypto_engine *engine, | ||
22 | struct ablkcipher_request *req, int err); | ||
23 | |||
24 | /** | ||
25 | * crypto_pump_requests - dequeue one request from engine queue to process | ||
26 | * @engine: the hardware engine | ||
27 | * @in_kthread: true if we are in the context of the request pump thread | ||
28 | * | ||
29 | * This function checks if there is any request in the engine queue that | ||
30 | * needs processing and if so call out to the driver to initialize hardware | ||
31 | * and handle each request. | ||
32 | */ | ||
33 | static void crypto_pump_requests(struct crypto_engine *engine, | ||
34 | bool in_kthread) | ||
35 | { | ||
36 | struct crypto_async_request *async_req, *backlog; | ||
37 | struct ablkcipher_request *req; | ||
38 | unsigned long flags; | ||
39 | bool was_busy = false; | ||
40 | int ret; | ||
41 | |||
42 | spin_lock_irqsave(&engine->queue_lock, flags); | ||
43 | |||
44 | /* Make sure we are not already running a request */ | ||
45 | if (engine->cur_req) | ||
46 | goto out; | ||
47 | |||
48 | /* If another context is idling then defer */ | ||
49 | if (engine->idling) { | ||
50 | queue_kthread_work(&engine->kworker, &engine->pump_requests); | ||
51 | goto out; | ||
52 | } | ||
53 | |||
54 | /* Check if the engine queue is idle */ | ||
55 | if (!crypto_queue_len(&engine->queue) || !engine->running) { | ||
56 | if (!engine->busy) | ||
57 | goto out; | ||
58 | |||
59 | /* Only do teardown in the thread */ | ||
60 | if (!in_kthread) { | ||
61 | queue_kthread_work(&engine->kworker, | ||
62 | &engine->pump_requests); | ||
63 | goto out; | ||
64 | } | ||
65 | |||
66 | engine->busy = false; | ||
67 | engine->idling = true; | ||
68 | spin_unlock_irqrestore(&engine->queue_lock, flags); | ||
69 | |||
70 | if (engine->unprepare_crypt_hardware && | ||
71 | engine->unprepare_crypt_hardware(engine)) | ||
72 | pr_err("failed to unprepare crypt hardware\n"); | ||
73 | |||
74 | spin_lock_irqsave(&engine->queue_lock, flags); | ||
75 | engine->idling = false; | ||
76 | goto out; | ||
77 | } | ||
78 | |||
79 | /* Get the fist request from the engine queue to handle */ | ||
80 | backlog = crypto_get_backlog(&engine->queue); | ||
81 | async_req = crypto_dequeue_request(&engine->queue); | ||
82 | if (!async_req) | ||
83 | goto out; | ||
84 | |||
85 | req = ablkcipher_request_cast(async_req); | ||
86 | |||
87 | engine->cur_req = req; | ||
88 | if (backlog) | ||
89 | backlog->complete(backlog, -EINPROGRESS); | ||
90 | |||
91 | if (engine->busy) | ||
92 | was_busy = true; | ||
93 | else | ||
94 | engine->busy = true; | ||
95 | |||
96 | spin_unlock_irqrestore(&engine->queue_lock, flags); | ||
97 | |||
98 | /* Until here we get the request need to be encrypted successfully */ | ||
99 | if (!was_busy && engine->prepare_crypt_hardware) { | ||
100 | ret = engine->prepare_crypt_hardware(engine); | ||
101 | if (ret) { | ||
102 | pr_err("failed to prepare crypt hardware\n"); | ||
103 | goto req_err; | ||
104 | } | ||
105 | } | ||
106 | |||
107 | if (engine->prepare_request) { | ||
108 | ret = engine->prepare_request(engine, engine->cur_req); | ||
109 | if (ret) { | ||
110 | pr_err("failed to prepare request: %d\n", ret); | ||
111 | goto req_err; | ||
112 | } | ||
113 | engine->cur_req_prepared = true; | ||
114 | } | ||
115 | |||
116 | ret = engine->crypt_one_request(engine, engine->cur_req); | ||
117 | if (ret) { | ||
118 | pr_err("failed to crypt one request from queue\n"); | ||
119 | goto req_err; | ||
120 | } | ||
121 | return; | ||
122 | |||
123 | req_err: | ||
124 | crypto_finalize_request(engine, engine->cur_req, ret); | ||
125 | return; | ||
126 | |||
127 | out: | ||
128 | spin_unlock_irqrestore(&engine->queue_lock, flags); | ||
129 | } | ||
130 | |||
131 | static void crypto_pump_work(struct kthread_work *work) | ||
132 | { | ||
133 | struct crypto_engine *engine = | ||
134 | container_of(work, struct crypto_engine, pump_requests); | ||
135 | |||
136 | crypto_pump_requests(engine, true); | ||
137 | } | ||
138 | |||
139 | /** | ||
140 | * crypto_transfer_request - transfer the new request into the engine queue | ||
141 | * @engine: the hardware engine | ||
142 | * @req: the request need to be listed into the engine queue | ||
143 | */ | ||
144 | int crypto_transfer_request(struct crypto_engine *engine, | ||
145 | struct ablkcipher_request *req, bool need_pump) | ||
146 | { | ||
147 | unsigned long flags; | ||
148 | int ret; | ||
149 | |||
150 | spin_lock_irqsave(&engine->queue_lock, flags); | ||
151 | |||
152 | if (!engine->running) { | ||
153 | spin_unlock_irqrestore(&engine->queue_lock, flags); | ||
154 | return -ESHUTDOWN; | ||
155 | } | ||
156 | |||
157 | ret = ablkcipher_enqueue_request(&engine->queue, req); | ||
158 | |||
159 | if (!engine->busy && need_pump) | ||
160 | queue_kthread_work(&engine->kworker, &engine->pump_requests); | ||
161 | |||
162 | spin_unlock_irqrestore(&engine->queue_lock, flags); | ||
163 | return ret; | ||
164 | } | ||
165 | EXPORT_SYMBOL_GPL(crypto_transfer_request); | ||
166 | |||
167 | /** | ||
168 | * crypto_transfer_request_to_engine - transfer one request to list into the | ||
169 | * engine queue | ||
170 | * @engine: the hardware engine | ||
171 | * @req: the request need to be listed into the engine queue | ||
172 | */ | ||
173 | int crypto_transfer_request_to_engine(struct crypto_engine *engine, | ||
174 | struct ablkcipher_request *req) | ||
175 | { | ||
176 | return crypto_transfer_request(engine, req, true); | ||
177 | } | ||
178 | EXPORT_SYMBOL_GPL(crypto_transfer_request_to_engine); | ||
179 | |||
180 | /** | ||
181 | * crypto_finalize_request - finalize one request if the request is done | ||
182 | * @engine: the hardware engine | ||
183 | * @req: the request need to be finalized | ||
184 | * @err: error number | ||
185 | */ | ||
186 | void crypto_finalize_request(struct crypto_engine *engine, | ||
187 | struct ablkcipher_request *req, int err) | ||
188 | { | ||
189 | unsigned long flags; | ||
190 | bool finalize_cur_req = false; | ||
191 | int ret; | ||
192 | |||
193 | spin_lock_irqsave(&engine->queue_lock, flags); | ||
194 | if (engine->cur_req == req) | ||
195 | finalize_cur_req = true; | ||
196 | spin_unlock_irqrestore(&engine->queue_lock, flags); | ||
197 | |||
198 | if (finalize_cur_req) { | ||
199 | if (engine->cur_req_prepared && engine->unprepare_request) { | ||
200 | ret = engine->unprepare_request(engine, req); | ||
201 | if (ret) | ||
202 | pr_err("failed to unprepare request\n"); | ||
203 | } | ||
204 | |||
205 | spin_lock_irqsave(&engine->queue_lock, flags); | ||
206 | engine->cur_req = NULL; | ||
207 | engine->cur_req_prepared = false; | ||
208 | spin_unlock_irqrestore(&engine->queue_lock, flags); | ||
209 | } | ||
210 | |||
211 | req->base.complete(&req->base, err); | ||
212 | |||
213 | queue_kthread_work(&engine->kworker, &engine->pump_requests); | ||
214 | } | ||
215 | EXPORT_SYMBOL_GPL(crypto_finalize_request); | ||
216 | |||
217 | /** | ||
218 | * crypto_engine_start - start the hardware engine | ||
219 | * @engine: the hardware engine need to be started | ||
220 | * | ||
221 | * Return 0 on success, else on fail. | ||
222 | */ | ||
223 | int crypto_engine_start(struct crypto_engine *engine) | ||
224 | { | ||
225 | unsigned long flags; | ||
226 | |||
227 | spin_lock_irqsave(&engine->queue_lock, flags); | ||
228 | |||
229 | if (engine->running || engine->busy) { | ||
230 | spin_unlock_irqrestore(&engine->queue_lock, flags); | ||
231 | return -EBUSY; | ||
232 | } | ||
233 | |||
234 | engine->running = true; | ||
235 | spin_unlock_irqrestore(&engine->queue_lock, flags); | ||
236 | |||
237 | queue_kthread_work(&engine->kworker, &engine->pump_requests); | ||
238 | |||
239 | return 0; | ||
240 | } | ||
241 | EXPORT_SYMBOL_GPL(crypto_engine_start); | ||
242 | |||
243 | /** | ||
244 | * crypto_engine_stop - stop the hardware engine | ||
245 | * @engine: the hardware engine need to be stopped | ||
246 | * | ||
247 | * Return 0 on success, else on fail. | ||
248 | */ | ||
249 | int crypto_engine_stop(struct crypto_engine *engine) | ||
250 | { | ||
251 | unsigned long flags; | ||
252 | unsigned limit = 500; | ||
253 | int ret = 0; | ||
254 | |||
255 | spin_lock_irqsave(&engine->queue_lock, flags); | ||
256 | |||
257 | /* | ||
258 | * If the engine queue is not empty or the engine is on busy state, | ||
259 | * we need to wait for a while to pump the requests of engine queue. | ||
260 | */ | ||
261 | while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) { | ||
262 | spin_unlock_irqrestore(&engine->queue_lock, flags); | ||
263 | msleep(20); | ||
264 | spin_lock_irqsave(&engine->queue_lock, flags); | ||
265 | } | ||
266 | |||
267 | if (crypto_queue_len(&engine->queue) || engine->busy) | ||
268 | ret = -EBUSY; | ||
269 | else | ||
270 | engine->running = false; | ||
271 | |||
272 | spin_unlock_irqrestore(&engine->queue_lock, flags); | ||
273 | |||
274 | if (ret) | ||
275 | pr_warn("could not stop engine\n"); | ||
276 | |||
277 | return ret; | ||
278 | } | ||
279 | EXPORT_SYMBOL_GPL(crypto_engine_stop); | ||
280 | |||
281 | /** | ||
282 | * crypto_engine_alloc_init - allocate crypto hardware engine structure and | ||
283 | * initialize it. | ||
284 | * @dev: the device attached with one hardware engine | ||
285 | * @rt: whether this queue is set to run as a realtime task | ||
286 | * | ||
287 | * This must be called from context that can sleep. | ||
288 | * Return: the crypto engine structure on success, else NULL. | ||
289 | */ | ||
290 | struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) | ||
291 | { | ||
292 | struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; | ||
293 | struct crypto_engine *engine; | ||
294 | |||
295 | if (!dev) | ||
296 | return NULL; | ||
297 | |||
298 | engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL); | ||
299 | if (!engine) | ||
300 | return NULL; | ||
301 | |||
302 | engine->rt = rt; | ||
303 | engine->running = false; | ||
304 | engine->busy = false; | ||
305 | engine->idling = false; | ||
306 | engine->cur_req_prepared = false; | ||
307 | engine->priv_data = dev; | ||
308 | snprintf(engine->name, sizeof(engine->name), | ||
309 | "%s-engine", dev_name(dev)); | ||
310 | |||
311 | crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN); | ||
312 | spin_lock_init(&engine->queue_lock); | ||
313 | |||
314 | init_kthread_worker(&engine->kworker); | ||
315 | engine->kworker_task = kthread_run(kthread_worker_fn, | ||
316 | &engine->kworker, "%s", | ||
317 | engine->name); | ||
318 | if (IS_ERR(engine->kworker_task)) { | ||
319 | dev_err(dev, "failed to create crypto request pump task\n"); | ||
320 | return NULL; | ||
321 | } | ||
322 | init_kthread_work(&engine->pump_requests, crypto_pump_work); | ||
323 | |||
324 | if (engine->rt) { | ||
325 | dev_info(dev, "will run requests pump with realtime priority\n"); | ||
326 | sched_setscheduler(engine->kworker_task, SCHED_FIFO, ¶m); | ||
327 | } | ||
328 | |||
329 | return engine; | ||
330 | } | ||
331 | EXPORT_SYMBOL_GPL(crypto_engine_alloc_init); | ||
332 | |||
333 | /** | ||
334 | * crypto_engine_exit - free the resources of hardware engine when exit | ||
335 | * @engine: the hardware engine need to be freed | ||
336 | * | ||
337 | * Return 0 for success. | ||
338 | */ | ||
339 | int crypto_engine_exit(struct crypto_engine *engine) | ||
340 | { | ||
341 | int ret; | ||
342 | |||
343 | ret = crypto_engine_stop(engine); | ||
344 | if (ret) | ||
345 | return ret; | ||
346 | |||
347 | flush_kthread_worker(&engine->kworker); | ||
348 | kthread_stop(engine->kworker_task); | ||
349 | |||
350 | return 0; | ||
351 | } | ||
352 | EXPORT_SYMBOL_GPL(crypto_engine_exit); | ||
353 | |||
354 | MODULE_LICENSE("GPL"); | ||
355 | MODULE_DESCRIPTION("Crypto hardware engine framework"); | ||
diff --git a/crypto/drbg.c b/crypto/drbg.c index ab6ef1d08568..1b86310db7b1 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c | |||
@@ -220,48 +220,6 @@ static inline unsigned short drbg_sec_strength(drbg_flag_t flags) | |||
220 | } | 220 | } |
221 | 221 | ||
222 | /* | 222 | /* |
223 | * FIPS 140-2 continuous self test | ||
224 | * The test is performed on the result of one round of the output | ||
225 | * function. Thus, the function implicitly knows the size of the | ||
226 | * buffer. | ||
227 | * | ||
228 | * @drbg DRBG handle | ||
229 | * @buf output buffer of random data to be checked | ||
230 | * | ||
231 | * return: | ||
232 | * true on success | ||
233 | * false on error | ||
234 | */ | ||
235 | static bool drbg_fips_continuous_test(struct drbg_state *drbg, | ||
236 | const unsigned char *buf) | ||
237 | { | ||
238 | #ifdef CONFIG_CRYPTO_FIPS | ||
239 | int ret = 0; | ||
240 | /* skip test if we test the overall system */ | ||
241 | if (list_empty(&drbg->test_data.list)) | ||
242 | return true; | ||
243 | /* only perform test in FIPS mode */ | ||
244 | if (0 == fips_enabled) | ||
245 | return true; | ||
246 | if (!drbg->fips_primed) { | ||
247 | /* Priming of FIPS test */ | ||
248 | memcpy(drbg->prev, buf, drbg_blocklen(drbg)); | ||
249 | drbg->fips_primed = true; | ||
250 | /* return false due to priming, i.e. another round is needed */ | ||
251 | return false; | ||
252 | } | ||
253 | ret = memcmp(drbg->prev, buf, drbg_blocklen(drbg)); | ||
254 | if (!ret) | ||
255 | panic("DRBG continuous self test failed\n"); | ||
256 | memcpy(drbg->prev, buf, drbg_blocklen(drbg)); | ||
257 | /* the test shall pass when the two compared values are not equal */ | ||
258 | return ret != 0; | ||
259 | #else | ||
260 | return true; | ||
261 | #endif /* CONFIG_CRYPTO_FIPS */ | ||
262 | } | ||
263 | |||
264 | /* | ||
265 | * Convert an integer into a byte representation of this integer. | 223 | * Convert an integer into a byte representation of this integer. |
266 | * The byte representation is big-endian | 224 | * The byte representation is big-endian |
267 | * | 225 | * |
@@ -603,11 +561,6 @@ static int drbg_ctr_generate(struct drbg_state *drbg, | |||
603 | } | 561 | } |
604 | outlen = (drbg_blocklen(drbg) < (buflen - len)) ? | 562 | outlen = (drbg_blocklen(drbg) < (buflen - len)) ? |
605 | drbg_blocklen(drbg) : (buflen - len); | 563 | drbg_blocklen(drbg) : (buflen - len); |
606 | if (!drbg_fips_continuous_test(drbg, drbg->scratchpad)) { | ||
607 | /* 10.2.1.5.2 step 6 */ | ||
608 | crypto_inc(drbg->V, drbg_blocklen(drbg)); | ||
609 | continue; | ||
610 | } | ||
611 | /* 10.2.1.5.2 step 4.3 */ | 564 | /* 10.2.1.5.2 step 4.3 */ |
612 | memcpy(buf + len, drbg->scratchpad, outlen); | 565 | memcpy(buf + len, drbg->scratchpad, outlen); |
613 | len += outlen; | 566 | len += outlen; |
@@ -733,8 +686,6 @@ static int drbg_hmac_generate(struct drbg_state *drbg, | |||
733 | return ret; | 686 | return ret; |
734 | outlen = (drbg_blocklen(drbg) < (buflen - len)) ? | 687 | outlen = (drbg_blocklen(drbg) < (buflen - len)) ? |
735 | drbg_blocklen(drbg) : (buflen - len); | 688 | drbg_blocklen(drbg) : (buflen - len); |
736 | if (!drbg_fips_continuous_test(drbg, drbg->V)) | ||
737 | continue; | ||
738 | 689 | ||
739 | /* 10.1.2.5 step 4.2 */ | 690 | /* 10.1.2.5 step 4.2 */ |
740 | memcpy(buf + len, drbg->V, outlen); | 691 | memcpy(buf + len, drbg->V, outlen); |
@@ -963,10 +914,6 @@ static int drbg_hash_hashgen(struct drbg_state *drbg, | |||
963 | } | 914 | } |
964 | outlen = (drbg_blocklen(drbg) < (buflen - len)) ? | 915 | outlen = (drbg_blocklen(drbg) < (buflen - len)) ? |
965 | drbg_blocklen(drbg) : (buflen - len); | 916 | drbg_blocklen(drbg) : (buflen - len); |
966 | if (!drbg_fips_continuous_test(drbg, dst)) { | ||
967 | crypto_inc(src, drbg_statelen(drbg)); | ||
968 | continue; | ||
969 | } | ||
970 | /* 10.1.1.4 step hashgen 4.2 */ | 917 | /* 10.1.1.4 step hashgen 4.2 */ |
971 | memcpy(buf + len, dst, outlen); | 918 | memcpy(buf + len, dst, outlen); |
972 | len += outlen; | 919 | len += outlen; |
@@ -1201,11 +1148,6 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg) | |||
1201 | drbg->reseed_ctr = 0; | 1148 | drbg->reseed_ctr = 0; |
1202 | drbg->d_ops = NULL; | 1149 | drbg->d_ops = NULL; |
1203 | drbg->core = NULL; | 1150 | drbg->core = NULL; |
1204 | #ifdef CONFIG_CRYPTO_FIPS | ||
1205 | kzfree(drbg->prev); | ||
1206 | drbg->prev = NULL; | ||
1207 | drbg->fips_primed = false; | ||
1208 | #endif | ||
1209 | } | 1151 | } |
1210 | 1152 | ||
1211 | /* | 1153 | /* |
@@ -1244,12 +1186,6 @@ static inline int drbg_alloc_state(struct drbg_state *drbg) | |||
1244 | drbg->C = kmalloc(drbg_statelen(drbg), GFP_KERNEL); | 1186 | drbg->C = kmalloc(drbg_statelen(drbg), GFP_KERNEL); |
1245 | if (!drbg->C) | 1187 | if (!drbg->C) |
1246 | goto err; | 1188 | goto err; |
1247 | #ifdef CONFIG_CRYPTO_FIPS | ||
1248 | drbg->prev = kmalloc(drbg_blocklen(drbg), GFP_KERNEL); | ||
1249 | if (!drbg->prev) | ||
1250 | goto err; | ||
1251 | drbg->fips_primed = false; | ||
1252 | #endif | ||
1253 | /* scratchpad is only generated for CTR and Hash */ | 1189 | /* scratchpad is only generated for CTR and Hash */ |
1254 | if (drbg->core->flags & DRBG_HMAC) | 1190 | if (drbg->core->flags & DRBG_HMAC) |
1255 | sb_size = 0; | 1191 | sb_size = 0; |
diff --git a/crypto/internal.h b/crypto/internal.h index 00e42a3ed814..7eefcdb00227 100644 --- a/crypto/internal.h +++ b/crypto/internal.h | |||
@@ -104,6 +104,9 @@ int crypto_probing_notify(unsigned long val, void *v); | |||
104 | 104 | ||
105 | unsigned int crypto_alg_extsize(struct crypto_alg *alg); | 105 | unsigned int crypto_alg_extsize(struct crypto_alg *alg); |
106 | 106 | ||
107 | int crypto_type_has_alg(const char *name, const struct crypto_type *frontend, | ||
108 | u32 type, u32 mask); | ||
109 | |||
107 | static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg) | 110 | static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg) |
108 | { | 111 | { |
109 | atomic_inc(&alg->cra_refcnt); | 112 | atomic_inc(&alg->cra_refcnt); |
diff --git a/crypto/keywrap.c b/crypto/keywrap.c index b1d106ce55f3..72014f963ba7 100644 --- a/crypto/keywrap.c +++ b/crypto/keywrap.c | |||
@@ -212,7 +212,7 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc, | |||
212 | SEMIBSIZE)) | 212 | SEMIBSIZE)) |
213 | ret = -EBADMSG; | 213 | ret = -EBADMSG; |
214 | 214 | ||
215 | memzero_explicit(&block, sizeof(struct crypto_kw_block)); | 215 | memzero_explicit(block, sizeof(struct crypto_kw_block)); |
216 | 216 | ||
217 | return ret; | 217 | return ret; |
218 | } | 218 | } |
@@ -297,7 +297,7 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc, | |||
297 | /* establish the IV for the caller to pick up */ | 297 | /* establish the IV for the caller to pick up */ |
298 | memcpy(desc->info, block->A, SEMIBSIZE); | 298 | memcpy(desc->info, block->A, SEMIBSIZE); |
299 | 299 | ||
300 | memzero_explicit(&block, sizeof(struct crypto_kw_block)); | 300 | memzero_explicit(block, sizeof(struct crypto_kw_block)); |
301 | 301 | ||
302 | return 0; | 302 | return 0; |
303 | } | 303 | } |
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c index f78d4fc4e38a..c4eb9da49d4f 100644 --- a/crypto/mcryptd.c +++ b/crypto/mcryptd.c | |||
@@ -522,6 +522,7 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, | |||
522 | inst->alg.halg.base.cra_flags = type; | 522 | inst->alg.halg.base.cra_flags = type; |
523 | 523 | ||
524 | inst->alg.halg.digestsize = salg->digestsize; | 524 | inst->alg.halg.digestsize = salg->digestsize; |
525 | inst->alg.halg.statesize = salg->statesize; | ||
525 | inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx); | 526 | inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx); |
526 | 527 | ||
527 | inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm; | 528 | inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm; |
diff --git a/crypto/pcompress.c b/crypto/pcompress.c deleted file mode 100644 index 7a13b4088857..000000000000 --- a/crypto/pcompress.c +++ /dev/null | |||
@@ -1,115 +0,0 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * Partial (de)compression operations. | ||
5 | * | ||
6 | * Copyright 2008 Sony Corporation | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; version 2 of the License. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. | ||
19 | * If not, see <http://www.gnu.org/licenses/>. | ||
20 | */ | ||
21 | |||
22 | #include <linux/crypto.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/seq_file.h> | ||
26 | #include <linux/string.h> | ||
27 | #include <linux/cryptouser.h> | ||
28 | #include <net/netlink.h> | ||
29 | |||
30 | #include <crypto/compress.h> | ||
31 | #include <crypto/internal/compress.h> | ||
32 | |||
33 | #include "internal.h" | ||
34 | |||
35 | |||
36 | static int crypto_pcomp_init(struct crypto_tfm *tfm, u32 type, u32 mask) | ||
37 | { | ||
38 | return 0; | ||
39 | } | ||
40 | |||
41 | static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm) | ||
42 | { | ||
43 | return 0; | ||
44 | } | ||
45 | |||
46 | #ifdef CONFIG_NET | ||
47 | static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg) | ||
48 | { | ||
49 | struct crypto_report_comp rpcomp; | ||
50 | |||
51 | strncpy(rpcomp.type, "pcomp", sizeof(rpcomp.type)); | ||
52 | if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, | ||
53 | sizeof(struct crypto_report_comp), &rpcomp)) | ||
54 | goto nla_put_failure; | ||
55 | return 0; | ||
56 | |||
57 | nla_put_failure: | ||
58 | return -EMSGSIZE; | ||
59 | } | ||
60 | #else | ||
61 | static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg) | ||
62 | { | ||
63 | return -ENOSYS; | ||
64 | } | ||
65 | #endif | ||
66 | |||
67 | static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg) | ||
68 | __attribute__ ((unused)); | ||
69 | static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg) | ||
70 | { | ||
71 | seq_printf(m, "type : pcomp\n"); | ||
72 | } | ||
73 | |||
74 | static const struct crypto_type crypto_pcomp_type = { | ||
75 | .extsize = crypto_alg_extsize, | ||
76 | .init = crypto_pcomp_init, | ||
77 | .init_tfm = crypto_pcomp_init_tfm, | ||
78 | #ifdef CONFIG_PROC_FS | ||
79 | .show = crypto_pcomp_show, | ||
80 | #endif | ||
81 | .report = crypto_pcomp_report, | ||
82 | .maskclear = ~CRYPTO_ALG_TYPE_MASK, | ||
83 | .maskset = CRYPTO_ALG_TYPE_MASK, | ||
84 | .type = CRYPTO_ALG_TYPE_PCOMPRESS, | ||
85 | .tfmsize = offsetof(struct crypto_pcomp, base), | ||
86 | }; | ||
87 | |||
88 | struct crypto_pcomp *crypto_alloc_pcomp(const char *alg_name, u32 type, | ||
89 | u32 mask) | ||
90 | { | ||
91 | return crypto_alloc_tfm(alg_name, &crypto_pcomp_type, type, mask); | ||
92 | } | ||
93 | EXPORT_SYMBOL_GPL(crypto_alloc_pcomp); | ||
94 | |||
95 | int crypto_register_pcomp(struct pcomp_alg *alg) | ||
96 | { | ||
97 | struct crypto_alg *base = &alg->base; | ||
98 | |||
99 | base->cra_type = &crypto_pcomp_type; | ||
100 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; | ||
101 | base->cra_flags |= CRYPTO_ALG_TYPE_PCOMPRESS; | ||
102 | |||
103 | return crypto_register_alg(base); | ||
104 | } | ||
105 | EXPORT_SYMBOL_GPL(crypto_register_pcomp); | ||
106 | |||
107 | int crypto_unregister_pcomp(struct pcomp_alg *alg) | ||
108 | { | ||
109 | return crypto_unregister_alg(&alg->base); | ||
110 | } | ||
111 | EXPORT_SYMBOL_GPL(crypto_unregister_pcomp); | ||
112 | |||
113 | MODULE_LICENSE("GPL"); | ||
114 | MODULE_DESCRIPTION("Partial (de)compression type"); | ||
115 | MODULE_AUTHOR("Sony Corporation"); | ||
diff --git a/crypto/shash.c b/crypto/shash.c index 359754591653..a051541a4a17 100644 --- a/crypto/shash.c +++ b/crypto/shash.c | |||
@@ -368,151 +368,6 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm) | |||
368 | return 0; | 368 | return 0; |
369 | } | 369 | } |
370 | 370 | ||
371 | static int shash_compat_setkey(struct crypto_hash *tfm, const u8 *key, | ||
372 | unsigned int keylen) | ||
373 | { | ||
374 | struct shash_desc **descp = crypto_hash_ctx(tfm); | ||
375 | struct shash_desc *desc = *descp; | ||
376 | |||
377 | return crypto_shash_setkey(desc->tfm, key, keylen); | ||
378 | } | ||
379 | |||
380 | static int shash_compat_init(struct hash_desc *hdesc) | ||
381 | { | ||
382 | struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm); | ||
383 | struct shash_desc *desc = *descp; | ||
384 | |||
385 | desc->flags = hdesc->flags; | ||
386 | |||
387 | return crypto_shash_init(desc); | ||
388 | } | ||
389 | |||
390 | static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg, | ||
391 | unsigned int len) | ||
392 | { | ||
393 | struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm); | ||
394 | struct shash_desc *desc = *descp; | ||
395 | struct crypto_hash_walk walk; | ||
396 | int nbytes; | ||
397 | |||
398 | for (nbytes = crypto_hash_walk_first_compat(hdesc, &walk, sg, len); | ||
399 | nbytes > 0; nbytes = crypto_hash_walk_done(&walk, nbytes)) | ||
400 | nbytes = crypto_shash_update(desc, walk.data, nbytes); | ||
401 | |||
402 | return nbytes; | ||
403 | } | ||
404 | |||
405 | static int shash_compat_final(struct hash_desc *hdesc, u8 *out) | ||
406 | { | ||
407 | struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm); | ||
408 | |||
409 | return crypto_shash_final(*descp, out); | ||
410 | } | ||
411 | |||
412 | static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg, | ||
413 | unsigned int nbytes, u8 *out) | ||
414 | { | ||
415 | unsigned int offset = sg->offset; | ||
416 | int err; | ||
417 | |||
418 | if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { | ||
419 | struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm); | ||
420 | struct shash_desc *desc = *descp; | ||
421 | void *data; | ||
422 | |||
423 | desc->flags = hdesc->flags; | ||
424 | |||
425 | data = kmap_atomic(sg_page(sg)); | ||
426 | err = crypto_shash_digest(desc, data + offset, nbytes, out); | ||
427 | kunmap_atomic(data); | ||
428 | crypto_yield(desc->flags); | ||
429 | goto out; | ||
430 | } | ||
431 | |||
432 | err = shash_compat_init(hdesc); | ||
433 | if (err) | ||
434 | goto out; | ||
435 | |||
436 | err = shash_compat_update(hdesc, sg, nbytes); | ||
437 | if (err) | ||
438 | goto out; | ||
439 | |||
440 | err = shash_compat_final(hdesc, out); | ||
441 | |||
442 | out: | ||
443 | return err; | ||
444 | } | ||
445 | |||
446 | static void crypto_exit_shash_ops_compat(struct crypto_tfm *tfm) | ||
447 | { | ||
448 | struct shash_desc **descp = crypto_tfm_ctx(tfm); | ||
449 | struct shash_desc *desc = *descp; | ||
450 | |||
451 | crypto_free_shash(desc->tfm); | ||
452 | kzfree(desc); | ||
453 | } | ||
454 | |||
455 | static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm) | ||
456 | { | ||
457 | struct hash_tfm *crt = &tfm->crt_hash; | ||
458 | struct crypto_alg *calg = tfm->__crt_alg; | ||
459 | struct shash_alg *alg = __crypto_shash_alg(calg); | ||
460 | struct shash_desc **descp = crypto_tfm_ctx(tfm); | ||
461 | struct crypto_shash *shash; | ||
462 | struct shash_desc *desc; | ||
463 | |||
464 | if (!crypto_mod_get(calg)) | ||
465 | return -EAGAIN; | ||
466 | |||
467 | shash = crypto_create_tfm(calg, &crypto_shash_type); | ||
468 | if (IS_ERR(shash)) { | ||
469 | crypto_mod_put(calg); | ||
470 | return PTR_ERR(shash); | ||
471 | } | ||
472 | |||
473 | desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(shash), | ||
474 | GFP_KERNEL); | ||
475 | if (!desc) { | ||
476 | crypto_free_shash(shash); | ||
477 | return -ENOMEM; | ||
478 | } | ||
479 | |||
480 | *descp = desc; | ||
481 | desc->tfm = shash; | ||
482 | tfm->exit = crypto_exit_shash_ops_compat; | ||
483 | |||
484 | crt->init = shash_compat_init; | ||
485 | crt->update = shash_compat_update; | ||
486 | crt->final = shash_compat_final; | ||
487 | crt->digest = shash_compat_digest; | ||
488 | crt->setkey = shash_compat_setkey; | ||
489 | |||
490 | crt->digestsize = alg->digestsize; | ||
491 | |||
492 | return 0; | ||
493 | } | ||
494 | |||
495 | static int crypto_init_shash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) | ||
496 | { | ||
497 | switch (mask & CRYPTO_ALG_TYPE_MASK) { | ||
498 | case CRYPTO_ALG_TYPE_HASH_MASK: | ||
499 | return crypto_init_shash_ops_compat(tfm); | ||
500 | } | ||
501 | |||
502 | return -EINVAL; | ||
503 | } | ||
504 | |||
505 | static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type, | ||
506 | u32 mask) | ||
507 | { | ||
508 | switch (mask & CRYPTO_ALG_TYPE_MASK) { | ||
509 | case CRYPTO_ALG_TYPE_HASH_MASK: | ||
510 | return sizeof(struct shash_desc *); | ||
511 | } | ||
512 | |||
513 | return 0; | ||
514 | } | ||
515 | |||
516 | static int crypto_shash_init_tfm(struct crypto_tfm *tfm) | 371 | static int crypto_shash_init_tfm(struct crypto_tfm *tfm) |
517 | { | 372 | { |
518 | struct crypto_shash *hash = __crypto_shash_cast(tfm); | 373 | struct crypto_shash *hash = __crypto_shash_cast(tfm); |
@@ -559,9 +414,7 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg) | |||
559 | } | 414 | } |
560 | 415 | ||
561 | static const struct crypto_type crypto_shash_type = { | 416 | static const struct crypto_type crypto_shash_type = { |
562 | .ctxsize = crypto_shash_ctxsize, | ||
563 | .extsize = crypto_alg_extsize, | 417 | .extsize = crypto_alg_extsize, |
564 | .init = crypto_init_shash_ops, | ||
565 | .init_tfm = crypto_shash_init_tfm, | 418 | .init_tfm = crypto_shash_init_tfm, |
566 | #ifdef CONFIG_PROC_FS | 419 | #ifdef CONFIG_PROC_FS |
567 | .show = crypto_shash_show, | 420 | .show = crypto_shash_show, |
diff --git a/crypto/skcipher.c b/crypto/skcipher.c index d199c0b1751c..69230e9d4ac9 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c | |||
@@ -118,7 +118,7 @@ static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm) | |||
118 | skcipher->decrypt = skcipher_decrypt_blkcipher; | 118 | skcipher->decrypt = skcipher_decrypt_blkcipher; |
119 | 119 | ||
120 | skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher); | 120 | skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher); |
121 | skcipher->has_setkey = calg->cra_blkcipher.max_keysize; | 121 | skcipher->keysize = calg->cra_blkcipher.max_keysize; |
122 | 122 | ||
123 | return 0; | 123 | return 0; |
124 | } | 124 | } |
@@ -211,7 +211,7 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm) | |||
211 | skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher); | 211 | skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher); |
212 | skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) + | 212 | skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) + |
213 | sizeof(struct ablkcipher_request); | 213 | sizeof(struct ablkcipher_request); |
214 | skcipher->has_setkey = calg->cra_ablkcipher.max_keysize; | 214 | skcipher->keysize = calg->cra_ablkcipher.max_keysize; |
215 | 215 | ||
216 | return 0; | 216 | return 0; |
217 | } | 217 | } |
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 270bc4b82bd9..579dce071463 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c | |||
@@ -554,164 +554,6 @@ out: | |||
554 | crypto_free_blkcipher(tfm); | 554 | crypto_free_blkcipher(tfm); |
555 | } | 555 | } |
556 | 556 | ||
557 | static int test_hash_jiffies_digest(struct hash_desc *desc, | ||
558 | struct scatterlist *sg, int blen, | ||
559 | char *out, int secs) | ||
560 | { | ||
561 | unsigned long start, end; | ||
562 | int bcount; | ||
563 | int ret; | ||
564 | |||
565 | for (start = jiffies, end = start + secs * HZ, bcount = 0; | ||
566 | time_before(jiffies, end); bcount++) { | ||
567 | ret = crypto_hash_digest(desc, sg, blen, out); | ||
568 | if (ret) | ||
569 | return ret; | ||
570 | } | ||
571 | |||
572 | printk("%6u opers/sec, %9lu bytes/sec\n", | ||
573 | bcount / secs, ((long)bcount * blen) / secs); | ||
574 | |||
575 | return 0; | ||
576 | } | ||
577 | |||
578 | static int test_hash_jiffies(struct hash_desc *desc, struct scatterlist *sg, | ||
579 | int blen, int plen, char *out, int secs) | ||
580 | { | ||
581 | unsigned long start, end; | ||
582 | int bcount, pcount; | ||
583 | int ret; | ||
584 | |||
585 | if (plen == blen) | ||
586 | return test_hash_jiffies_digest(desc, sg, blen, out, secs); | ||
587 | |||
588 | for (start = jiffies, end = start + secs * HZ, bcount = 0; | ||
589 | time_before(jiffies, end); bcount++) { | ||
590 | ret = crypto_hash_init(desc); | ||
591 | if (ret) | ||
592 | return ret; | ||
593 | for (pcount = 0; pcount < blen; pcount += plen) { | ||
594 | ret = crypto_hash_update(desc, sg, plen); | ||
595 | if (ret) | ||
596 | return ret; | ||
597 | } | ||
598 | /* we assume there is enough space in 'out' for the result */ | ||
599 | ret = crypto_hash_final(desc, out); | ||
600 | if (ret) | ||
601 | return ret; | ||
602 | } | ||
603 | |||
604 | printk("%6u opers/sec, %9lu bytes/sec\n", | ||
605 | bcount / secs, ((long)bcount * blen) / secs); | ||
606 | |||
607 | return 0; | ||
608 | } | ||
609 | |||
610 | static int test_hash_cycles_digest(struct hash_desc *desc, | ||
611 | struct scatterlist *sg, int blen, char *out) | ||
612 | { | ||
613 | unsigned long cycles = 0; | ||
614 | int i; | ||
615 | int ret; | ||
616 | |||
617 | local_irq_disable(); | ||
618 | |||
619 | /* Warm-up run. */ | ||
620 | for (i = 0; i < 4; i++) { | ||
621 | ret = crypto_hash_digest(desc, sg, blen, out); | ||
622 | if (ret) | ||
623 | goto out; | ||
624 | } | ||
625 | |||
626 | /* The real thing. */ | ||
627 | for (i = 0; i < 8; i++) { | ||
628 | cycles_t start, end; | ||
629 | |||
630 | start = get_cycles(); | ||
631 | |||
632 | ret = crypto_hash_digest(desc, sg, blen, out); | ||
633 | if (ret) | ||
634 | goto out; | ||
635 | |||
636 | end = get_cycles(); | ||
637 | |||
638 | cycles += end - start; | ||
639 | } | ||
640 | |||
641 | out: | ||
642 | local_irq_enable(); | ||
643 | |||
644 | if (ret) | ||
645 | return ret; | ||
646 | |||
647 | printk("%6lu cycles/operation, %4lu cycles/byte\n", | ||
648 | cycles / 8, cycles / (8 * blen)); | ||
649 | |||
650 | return 0; | ||
651 | } | ||
652 | |||
653 | static int test_hash_cycles(struct hash_desc *desc, struct scatterlist *sg, | ||
654 | int blen, int plen, char *out) | ||
655 | { | ||
656 | unsigned long cycles = 0; | ||
657 | int i, pcount; | ||
658 | int ret; | ||
659 | |||
660 | if (plen == blen) | ||
661 | return test_hash_cycles_digest(desc, sg, blen, out); | ||
662 | |||
663 | local_irq_disable(); | ||
664 | |||
665 | /* Warm-up run. */ | ||
666 | for (i = 0; i < 4; i++) { | ||
667 | ret = crypto_hash_init(desc); | ||
668 | if (ret) | ||
669 | goto out; | ||
670 | for (pcount = 0; pcount < blen; pcount += plen) { | ||
671 | ret = crypto_hash_update(desc, sg, plen); | ||
672 | if (ret) | ||
673 | goto out; | ||
674 | } | ||
675 | ret = crypto_hash_final(desc, out); | ||
676 | if (ret) | ||
677 | goto out; | ||
678 | } | ||
679 | |||
680 | /* The real thing. */ | ||
681 | for (i = 0; i < 8; i++) { | ||
682 | cycles_t start, end; | ||
683 | |||
684 | start = get_cycles(); | ||
685 | |||
686 | ret = crypto_hash_init(desc); | ||
687 | if (ret) | ||
688 | goto out; | ||
689 | for (pcount = 0; pcount < blen; pcount += plen) { | ||
690 | ret = crypto_hash_update(desc, sg, plen); | ||
691 | if (ret) | ||
692 | goto out; | ||
693 | } | ||
694 | ret = crypto_hash_final(desc, out); | ||
695 | if (ret) | ||
696 | goto out; | ||
697 | |||
698 | end = get_cycles(); | ||
699 | |||
700 | cycles += end - start; | ||
701 | } | ||
702 | |||
703 | out: | ||
704 | local_irq_enable(); | ||
705 | |||
706 | if (ret) | ||
707 | return ret; | ||
708 | |||
709 | printk("%6lu cycles/operation, %4lu cycles/byte\n", | ||
710 | cycles / 8, cycles / (8 * blen)); | ||
711 | |||
712 | return 0; | ||
713 | } | ||
714 | |||
715 | static void test_hash_sg_init(struct scatterlist *sg) | 557 | static void test_hash_sg_init(struct scatterlist *sg) |
716 | { | 558 | { |
717 | int i; | 559 | int i; |
@@ -723,69 +565,6 @@ static void test_hash_sg_init(struct scatterlist *sg) | |||
723 | } | 565 | } |
724 | } | 566 | } |
725 | 567 | ||
726 | static void test_hash_speed(const char *algo, unsigned int secs, | ||
727 | struct hash_speed *speed) | ||
728 | { | ||
729 | struct scatterlist sg[TVMEMSIZE]; | ||
730 | struct crypto_hash *tfm; | ||
731 | struct hash_desc desc; | ||
732 | static char output[1024]; | ||
733 | int i; | ||
734 | int ret; | ||
735 | |||
736 | tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC); | ||
737 | |||
738 | if (IS_ERR(tfm)) { | ||
739 | printk(KERN_ERR "failed to load transform for %s: %ld\n", algo, | ||
740 | PTR_ERR(tfm)); | ||
741 | return; | ||
742 | } | ||
743 | |||
744 | printk(KERN_INFO "\ntesting speed of %s (%s)\n", algo, | ||
745 | get_driver_name(crypto_hash, tfm)); | ||
746 | |||
747 | desc.tfm = tfm; | ||
748 | desc.flags = 0; | ||
749 | |||
750 | if (crypto_hash_digestsize(tfm) > sizeof(output)) { | ||
751 | printk(KERN_ERR "digestsize(%u) > outputbuffer(%zu)\n", | ||
752 | crypto_hash_digestsize(tfm), sizeof(output)); | ||
753 | goto out; | ||
754 | } | ||
755 | |||
756 | test_hash_sg_init(sg); | ||
757 | for (i = 0; speed[i].blen != 0; i++) { | ||
758 | if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) { | ||
759 | printk(KERN_ERR | ||
760 | "template (%u) too big for tvmem (%lu)\n", | ||
761 | speed[i].blen, TVMEMSIZE * PAGE_SIZE); | ||
762 | goto out; | ||
763 | } | ||
764 | |||
765 | if (speed[i].klen) | ||
766 | crypto_hash_setkey(tfm, tvmem[0], speed[i].klen); | ||
767 | |||
768 | printk(KERN_INFO "test%3u " | ||
769 | "(%5u byte blocks,%5u bytes per update,%4u updates): ", | ||
770 | i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); | ||
771 | |||
772 | if (secs) | ||
773 | ret = test_hash_jiffies(&desc, sg, speed[i].blen, | ||
774 | speed[i].plen, output, secs); | ||
775 | else | ||
776 | ret = test_hash_cycles(&desc, sg, speed[i].blen, | ||
777 | speed[i].plen, output); | ||
778 | |||
779 | if (ret) { | ||
780 | printk(KERN_ERR "hashing failed ret=%d\n", ret); | ||
781 | break; | ||
782 | } | ||
783 | } | ||
784 | |||
785 | out: | ||
786 | crypto_free_hash(tfm); | ||
787 | } | ||
788 | |||
789 | static inline int do_one_ahash_op(struct ahash_request *req, int ret) | 568 | static inline int do_one_ahash_op(struct ahash_request *req, int ret) |
790 | { | 569 | { |
791 | if (ret == -EINPROGRESS || ret == -EBUSY) { | 570 | if (ret == -EINPROGRESS || ret == -EBUSY) { |
@@ -945,8 +724,8 @@ out: | |||
945 | return 0; | 724 | return 0; |
946 | } | 725 | } |
947 | 726 | ||
948 | static void test_ahash_speed(const char *algo, unsigned int secs, | 727 | static void test_ahash_speed_common(const char *algo, unsigned int secs, |
949 | struct hash_speed *speed) | 728 | struct hash_speed *speed, unsigned mask) |
950 | { | 729 | { |
951 | struct scatterlist sg[TVMEMSIZE]; | 730 | struct scatterlist sg[TVMEMSIZE]; |
952 | struct tcrypt_result tresult; | 731 | struct tcrypt_result tresult; |
@@ -955,7 +734,7 @@ static void test_ahash_speed(const char *algo, unsigned int secs, | |||
955 | char *output; | 734 | char *output; |
956 | int i, ret; | 735 | int i, ret; |
957 | 736 | ||
958 | tfm = crypto_alloc_ahash(algo, 0, 0); | 737 | tfm = crypto_alloc_ahash(algo, 0, mask); |
959 | if (IS_ERR(tfm)) { | 738 | if (IS_ERR(tfm)) { |
960 | pr_err("failed to load transform for %s: %ld\n", | 739 | pr_err("failed to load transform for %s: %ld\n", |
961 | algo, PTR_ERR(tfm)); | 740 | algo, PTR_ERR(tfm)); |
@@ -1021,6 +800,18 @@ out: | |||
1021 | crypto_free_ahash(tfm); | 800 | crypto_free_ahash(tfm); |
1022 | } | 801 | } |
1023 | 802 | ||
803 | static void test_ahash_speed(const char *algo, unsigned int secs, | ||
804 | struct hash_speed *speed) | ||
805 | { | ||
806 | return test_ahash_speed_common(algo, secs, speed, 0); | ||
807 | } | ||
808 | |||
809 | static void test_hash_speed(const char *algo, unsigned int secs, | ||
810 | struct hash_speed *speed) | ||
811 | { | ||
812 | return test_ahash_speed_common(algo, secs, speed, CRYPTO_ALG_ASYNC); | ||
813 | } | ||
814 | |||
1024 | static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret) | 815 | static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret) |
1025 | { | 816 | { |
1026 | if (ret == -EINPROGRESS || ret == -EBUSY) { | 817 | if (ret == -EINPROGRESS || ret == -EBUSY) { |
diff --git a/crypto/testmgr.c b/crypto/testmgr.c index ae8c57fd8bc7..b86883aedca1 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c | |||
@@ -96,13 +96,6 @@ struct comp_test_suite { | |||
96 | } comp, decomp; | 96 | } comp, decomp; |
97 | }; | 97 | }; |
98 | 98 | ||
99 | struct pcomp_test_suite { | ||
100 | struct { | ||
101 | struct pcomp_testvec *vecs; | ||
102 | unsigned int count; | ||
103 | } comp, decomp; | ||
104 | }; | ||
105 | |||
106 | struct hash_test_suite { | 99 | struct hash_test_suite { |
107 | struct hash_testvec *vecs; | 100 | struct hash_testvec *vecs; |
108 | unsigned int count; | 101 | unsigned int count; |
@@ -133,7 +126,6 @@ struct alg_test_desc { | |||
133 | struct aead_test_suite aead; | 126 | struct aead_test_suite aead; |
134 | struct cipher_test_suite cipher; | 127 | struct cipher_test_suite cipher; |
135 | struct comp_test_suite comp; | 128 | struct comp_test_suite comp; |
136 | struct pcomp_test_suite pcomp; | ||
137 | struct hash_test_suite hash; | 129 | struct hash_test_suite hash; |
138 | struct cprng_test_suite cprng; | 130 | struct cprng_test_suite cprng; |
139 | struct drbg_test_suite drbg; | 131 | struct drbg_test_suite drbg; |
@@ -198,6 +190,61 @@ static int wait_async_op(struct tcrypt_result *tr, int ret) | |||
198 | return ret; | 190 | return ret; |
199 | } | 191 | } |
200 | 192 | ||
193 | static int ahash_partial_update(struct ahash_request **preq, | ||
194 | struct crypto_ahash *tfm, struct hash_testvec *template, | ||
195 | void *hash_buff, int k, int temp, struct scatterlist *sg, | ||
196 | const char *algo, char *result, struct tcrypt_result *tresult) | ||
197 | { | ||
198 | char *state; | ||
199 | struct ahash_request *req; | ||
200 | int statesize, ret = -EINVAL; | ||
201 | |||
202 | req = *preq; | ||
203 | statesize = crypto_ahash_statesize( | ||
204 | crypto_ahash_reqtfm(req)); | ||
205 | state = kmalloc(statesize, GFP_KERNEL); | ||
206 | if (!state) { | ||
207 | pr_err("alt: hash: Failed to alloc state for %s\n", algo); | ||
208 | goto out_nostate; | ||
209 | } | ||
210 | ret = crypto_ahash_export(req, state); | ||
211 | if (ret) { | ||
212 | pr_err("alt: hash: Failed to export() for %s\n", algo); | ||
213 | goto out; | ||
214 | } | ||
215 | ahash_request_free(req); | ||
216 | req = ahash_request_alloc(tfm, GFP_KERNEL); | ||
217 | if (!req) { | ||
218 | pr_err("alg: hash: Failed to alloc request for %s\n", algo); | ||
219 | goto out_noreq; | ||
220 | } | ||
221 | ahash_request_set_callback(req, | ||
222 | CRYPTO_TFM_REQ_MAY_BACKLOG, | ||
223 | tcrypt_complete, tresult); | ||
224 | |||
225 | memcpy(hash_buff, template->plaintext + temp, | ||
226 | template->tap[k]); | ||
227 | sg_init_one(&sg[0], hash_buff, template->tap[k]); | ||
228 | ahash_request_set_crypt(req, sg, result, template->tap[k]); | ||
229 | ret = crypto_ahash_import(req, state); | ||
230 | if (ret) { | ||
231 | pr_err("alg: hash: Failed to import() for %s\n", algo); | ||
232 | goto out; | ||
233 | } | ||
234 | ret = wait_async_op(tresult, crypto_ahash_update(req)); | ||
235 | if (ret) | ||
236 | goto out; | ||
237 | *preq = req; | ||
238 | ret = 0; | ||
239 | goto out_noreq; | ||
240 | out: | ||
241 | ahash_request_free(req); | ||
242 | out_noreq: | ||
243 | kfree(state); | ||
244 | out_nostate: | ||
245 | return ret; | ||
246 | } | ||
247 | |||
201 | static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, | 248 | static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, |
202 | unsigned int tcount, bool use_digest, | 249 | unsigned int tcount, bool use_digest, |
203 | const int align_offset) | 250 | const int align_offset) |
@@ -385,6 +432,84 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, | |||
385 | } | 432 | } |
386 | } | 433 | } |
387 | 434 | ||
435 | /* partial update exercise */ | ||
436 | j = 0; | ||
437 | for (i = 0; i < tcount; i++) { | ||
438 | /* alignment tests are only done with continuous buffers */ | ||
439 | if (align_offset != 0) | ||
440 | break; | ||
441 | |||
442 | if (template[i].np < 2) | ||
443 | continue; | ||
444 | |||
445 | j++; | ||
446 | memset(result, 0, MAX_DIGEST_SIZE); | ||
447 | |||
448 | ret = -EINVAL; | ||
449 | hash_buff = xbuf[0]; | ||
450 | memcpy(hash_buff, template[i].plaintext, | ||
451 | template[i].tap[0]); | ||
452 | sg_init_one(&sg[0], hash_buff, template[i].tap[0]); | ||
453 | |||
454 | if (template[i].ksize) { | ||
455 | crypto_ahash_clear_flags(tfm, ~0); | ||
456 | if (template[i].ksize > MAX_KEYLEN) { | ||
457 | pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n", | ||
458 | j, algo, template[i].ksize, MAX_KEYLEN); | ||
459 | ret = -EINVAL; | ||
460 | goto out; | ||
461 | } | ||
462 | memcpy(key, template[i].key, template[i].ksize); | ||
463 | ret = crypto_ahash_setkey(tfm, key, template[i].ksize); | ||
464 | if (ret) { | ||
465 | pr_err("alg: hash: setkey failed on test %d for %s: ret=%d\n", | ||
466 | j, algo, -ret); | ||
467 | goto out; | ||
468 | } | ||
469 | } | ||
470 | |||
471 | ahash_request_set_crypt(req, sg, result, template[i].tap[0]); | ||
472 | ret = wait_async_op(&tresult, crypto_ahash_init(req)); | ||
473 | if (ret) { | ||
474 | pr_err("alt: hash: init failed on test %d for %s: ret=%d\n", | ||
475 | j, algo, -ret); | ||
476 | goto out; | ||
477 | } | ||
478 | ret = wait_async_op(&tresult, crypto_ahash_update(req)); | ||
479 | if (ret) { | ||
480 | pr_err("alt: hash: update failed on test %d for %s: ret=%d\n", | ||
481 | j, algo, -ret); | ||
482 | goto out; | ||
483 | } | ||
484 | |||
485 | temp = template[i].tap[0]; | ||
486 | for (k = 1; k < template[i].np; k++) { | ||
487 | ret = ahash_partial_update(&req, tfm, &template[i], | ||
488 | hash_buff, k, temp, &sg[0], algo, result, | ||
489 | &tresult); | ||
490 | if (ret) { | ||
491 | pr_err("hash: partial update failed on test %d for %s: ret=%d\n", | ||
492 | j, algo, -ret); | ||
493 | goto out_noreq; | ||
494 | } | ||
495 | temp += template[i].tap[k]; | ||
496 | } | ||
497 | ret = wait_async_op(&tresult, crypto_ahash_final(req)); | ||
498 | if (ret) { | ||
499 | pr_err("alt: hash: final failed on test %d for %s: ret=%d\n", | ||
500 | j, algo, -ret); | ||
501 | goto out; | ||
502 | } | ||
503 | if (memcmp(result, template[i].digest, | ||
504 | crypto_ahash_digestsize(tfm))) { | ||
505 | pr_err("alg: hash: Partial Test %d failed for %s\n", | ||
506 | j, algo); | ||
507 | hexdump(result, crypto_ahash_digestsize(tfm)); | ||
508 | ret = -EINVAL; | ||
509 | goto out; | ||
510 | } | ||
511 | } | ||
512 | |||
388 | ret = 0; | 513 | ret = 0; |
389 | 514 | ||
390 | out: | 515 | out: |
@@ -488,6 +613,8 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
488 | aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | 613 | aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
489 | tcrypt_complete, &result); | 614 | tcrypt_complete, &result); |
490 | 615 | ||
616 | iv_len = crypto_aead_ivsize(tfm); | ||
617 | |||
491 | for (i = 0, j = 0; i < tcount; i++) { | 618 | for (i = 0, j = 0; i < tcount; i++) { |
492 | if (template[i].np) | 619 | if (template[i].np) |
493 | continue; | 620 | continue; |
@@ -508,7 +635,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
508 | 635 | ||
509 | memcpy(input, template[i].input, template[i].ilen); | 636 | memcpy(input, template[i].input, template[i].ilen); |
510 | memcpy(assoc, template[i].assoc, template[i].alen); | 637 | memcpy(assoc, template[i].assoc, template[i].alen); |
511 | iv_len = crypto_aead_ivsize(tfm); | ||
512 | if (template[i].iv) | 638 | if (template[i].iv) |
513 | memcpy(iv, template[i].iv, iv_len); | 639 | memcpy(iv, template[i].iv, iv_len); |
514 | else | 640 | else |
@@ -617,7 +743,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
617 | j++; | 743 | j++; |
618 | 744 | ||
619 | if (template[i].iv) | 745 | if (template[i].iv) |
620 | memcpy(iv, template[i].iv, MAX_IVLEN); | 746 | memcpy(iv, template[i].iv, iv_len); |
621 | else | 747 | else |
622 | memset(iv, 0, MAX_IVLEN); | 748 | memset(iv, 0, MAX_IVLEN); |
623 | 749 | ||
@@ -1293,183 +1419,6 @@ out: | |||
1293 | return ret; | 1419 | return ret; |
1294 | } | 1420 | } |
1295 | 1421 | ||
1296 | static int test_pcomp(struct crypto_pcomp *tfm, | ||
1297 | struct pcomp_testvec *ctemplate, | ||
1298 | struct pcomp_testvec *dtemplate, int ctcount, | ||
1299 | int dtcount) | ||
1300 | { | ||
1301 | const char *algo = crypto_tfm_alg_driver_name(crypto_pcomp_tfm(tfm)); | ||
1302 | unsigned int i; | ||
1303 | char result[COMP_BUF_SIZE]; | ||
1304 | int res; | ||
1305 | |||
1306 | for (i = 0; i < ctcount; i++) { | ||
1307 | struct comp_request req; | ||
1308 | unsigned int produced = 0; | ||
1309 | |||
1310 | res = crypto_compress_setup(tfm, ctemplate[i].params, | ||
1311 | ctemplate[i].paramsize); | ||
1312 | if (res) { | ||
1313 | pr_err("alg: pcomp: compression setup failed on test " | ||
1314 | "%d for %s: error=%d\n", i + 1, algo, res); | ||
1315 | return res; | ||
1316 | } | ||
1317 | |||
1318 | res = crypto_compress_init(tfm); | ||
1319 | if (res) { | ||
1320 | pr_err("alg: pcomp: compression init failed on test " | ||
1321 | "%d for %s: error=%d\n", i + 1, algo, res); | ||
1322 | return res; | ||
1323 | } | ||
1324 | |||
1325 | memset(result, 0, sizeof(result)); | ||
1326 | |||
1327 | req.next_in = ctemplate[i].input; | ||
1328 | req.avail_in = ctemplate[i].inlen / 2; | ||
1329 | req.next_out = result; | ||
1330 | req.avail_out = ctemplate[i].outlen / 2; | ||
1331 | |||
1332 | res = crypto_compress_update(tfm, &req); | ||
1333 | if (res < 0 && (res != -EAGAIN || req.avail_in)) { | ||
1334 | pr_err("alg: pcomp: compression update failed on test " | ||
1335 | "%d for %s: error=%d\n", i + 1, algo, res); | ||
1336 | return res; | ||
1337 | } | ||
1338 | if (res > 0) | ||
1339 | produced += res; | ||
1340 | |||
1341 | /* Add remaining input data */ | ||
1342 | req.avail_in += (ctemplate[i].inlen + 1) / 2; | ||
1343 | |||
1344 | res = crypto_compress_update(tfm, &req); | ||
1345 | if (res < 0 && (res != -EAGAIN || req.avail_in)) { | ||
1346 | pr_err("alg: pcomp: compression update failed on test " | ||
1347 | "%d for %s: error=%d\n", i + 1, algo, res); | ||
1348 | return res; | ||
1349 | } | ||
1350 | if (res > 0) | ||
1351 | produced += res; | ||
1352 | |||
1353 | /* Provide remaining output space */ | ||
1354 | req.avail_out += COMP_BUF_SIZE - ctemplate[i].outlen / 2; | ||
1355 | |||
1356 | res = crypto_compress_final(tfm, &req); | ||
1357 | if (res < 0) { | ||
1358 | pr_err("alg: pcomp: compression final failed on test " | ||
1359 | "%d for %s: error=%d\n", i + 1, algo, res); | ||
1360 | return res; | ||
1361 | } | ||
1362 | produced += res; | ||
1363 | |||
1364 | if (COMP_BUF_SIZE - req.avail_out != ctemplate[i].outlen) { | ||
1365 | pr_err("alg: comp: Compression test %d failed for %s: " | ||
1366 | "output len = %d (expected %d)\n", i + 1, algo, | ||
1367 | COMP_BUF_SIZE - req.avail_out, | ||
1368 | ctemplate[i].outlen); | ||
1369 | return -EINVAL; | ||
1370 | } | ||
1371 | |||
1372 | if (produced != ctemplate[i].outlen) { | ||
1373 | pr_err("alg: comp: Compression test %d failed for %s: " | ||
1374 | "returned len = %u (expected %d)\n", i + 1, | ||
1375 | algo, produced, ctemplate[i].outlen); | ||
1376 | return -EINVAL; | ||
1377 | } | ||
1378 | |||
1379 | if (memcmp(result, ctemplate[i].output, ctemplate[i].outlen)) { | ||
1380 | pr_err("alg: pcomp: Compression test %d failed for " | ||
1381 | "%s\n", i + 1, algo); | ||
1382 | hexdump(result, ctemplate[i].outlen); | ||
1383 | return -EINVAL; | ||
1384 | } | ||
1385 | } | ||
1386 | |||
1387 | for (i = 0; i < dtcount; i++) { | ||
1388 | struct comp_request req; | ||
1389 | unsigned int produced = 0; | ||
1390 | |||
1391 | res = crypto_decompress_setup(tfm, dtemplate[i].params, | ||
1392 | dtemplate[i].paramsize); | ||
1393 | if (res) { | ||
1394 | pr_err("alg: pcomp: decompression setup failed on " | ||
1395 | "test %d for %s: error=%d\n", i + 1, algo, res); | ||
1396 | return res; | ||
1397 | } | ||
1398 | |||
1399 | res = crypto_decompress_init(tfm); | ||
1400 | if (res) { | ||
1401 | pr_err("alg: pcomp: decompression init failed on test " | ||
1402 | "%d for %s: error=%d\n", i + 1, algo, res); | ||
1403 | return res; | ||
1404 | } | ||
1405 | |||
1406 | memset(result, 0, sizeof(result)); | ||
1407 | |||
1408 | req.next_in = dtemplate[i].input; | ||
1409 | req.avail_in = dtemplate[i].inlen / 2; | ||
1410 | req.next_out = result; | ||
1411 | req.avail_out = dtemplate[i].outlen / 2; | ||
1412 | |||
1413 | res = crypto_decompress_update(tfm, &req); | ||
1414 | if (res < 0 && (res != -EAGAIN || req.avail_in)) { | ||
1415 | pr_err("alg: pcomp: decompression update failed on " | ||
1416 | "test %d for %s: error=%d\n", i + 1, algo, res); | ||
1417 | return res; | ||
1418 | } | ||
1419 | if (res > 0) | ||
1420 | produced += res; | ||
1421 | |||
1422 | /* Add remaining input data */ | ||
1423 | req.avail_in += (dtemplate[i].inlen + 1) / 2; | ||
1424 | |||
1425 | res = crypto_decompress_update(tfm, &req); | ||
1426 | if (res < 0 && (res != -EAGAIN || req.avail_in)) { | ||
1427 | pr_err("alg: pcomp: decompression update failed on " | ||
1428 | "test %d for %s: error=%d\n", i + 1, algo, res); | ||
1429 | return res; | ||
1430 | } | ||
1431 | if (res > 0) | ||
1432 | produced += res; | ||
1433 | |||
1434 | /* Provide remaining output space */ | ||
1435 | req.avail_out += COMP_BUF_SIZE - dtemplate[i].outlen / 2; | ||
1436 | |||
1437 | res = crypto_decompress_final(tfm, &req); | ||
1438 | if (res < 0 && (res != -EAGAIN || req.avail_in)) { | ||
1439 | pr_err("alg: pcomp: decompression final failed on " | ||
1440 | "test %d for %s: error=%d\n", i + 1, algo, res); | ||
1441 | return res; | ||
1442 | } | ||
1443 | if (res > 0) | ||
1444 | produced += res; | ||
1445 | |||
1446 | if (COMP_BUF_SIZE - req.avail_out != dtemplate[i].outlen) { | ||
1447 | pr_err("alg: comp: Decompression test %d failed for " | ||
1448 | "%s: output len = %d (expected %d)\n", i + 1, | ||
1449 | algo, COMP_BUF_SIZE - req.avail_out, | ||
1450 | dtemplate[i].outlen); | ||
1451 | return -EINVAL; | ||
1452 | } | ||
1453 | |||
1454 | if (produced != dtemplate[i].outlen) { | ||
1455 | pr_err("alg: comp: Decompression test %d failed for " | ||
1456 | "%s: returned len = %u (expected %d)\n", i + 1, | ||
1457 | algo, produced, dtemplate[i].outlen); | ||
1458 | return -EINVAL; | ||
1459 | } | ||
1460 | |||
1461 | if (memcmp(result, dtemplate[i].output, dtemplate[i].outlen)) { | ||
1462 | pr_err("alg: pcomp: Decompression test %d failed for " | ||
1463 | "%s\n", i + 1, algo); | ||
1464 | hexdump(result, dtemplate[i].outlen); | ||
1465 | return -EINVAL; | ||
1466 | } | ||
1467 | } | ||
1468 | |||
1469 | return 0; | ||
1470 | } | ||
1471 | |||
1472 | |||
1473 | static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template, | 1422 | static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template, |
1474 | unsigned int tcount) | 1423 | unsigned int tcount) |
1475 | { | 1424 | { |
@@ -1640,28 +1589,6 @@ static int alg_test_comp(const struct alg_test_desc *desc, const char *driver, | |||
1640 | return err; | 1589 | return err; |
1641 | } | 1590 | } |
1642 | 1591 | ||
1643 | static int alg_test_pcomp(const struct alg_test_desc *desc, const char *driver, | ||
1644 | u32 type, u32 mask) | ||
1645 | { | ||
1646 | struct crypto_pcomp *tfm; | ||
1647 | int err; | ||
1648 | |||
1649 | tfm = crypto_alloc_pcomp(driver, type, mask); | ||
1650 | if (IS_ERR(tfm)) { | ||
1651 | pr_err("alg: pcomp: Failed to load transform for %s: %ld\n", | ||
1652 | driver, PTR_ERR(tfm)); | ||
1653 | return PTR_ERR(tfm); | ||
1654 | } | ||
1655 | |||
1656 | err = test_pcomp(tfm, desc->suite.pcomp.comp.vecs, | ||
1657 | desc->suite.pcomp.decomp.vecs, | ||
1658 | desc->suite.pcomp.comp.count, | ||
1659 | desc->suite.pcomp.decomp.count); | ||
1660 | |||
1661 | crypto_free_pcomp(tfm); | ||
1662 | return err; | ||
1663 | } | ||
1664 | |||
1665 | static int alg_test_hash(const struct alg_test_desc *desc, const char *driver, | 1592 | static int alg_test_hash(const struct alg_test_desc *desc, const char *driver, |
1666 | u32 type, u32 mask) | 1593 | u32 type, u32 mask) |
1667 | { | 1594 | { |
@@ -2081,7 +2008,6 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
2081 | }, { | 2008 | }, { |
2082 | .alg = "ansi_cprng", | 2009 | .alg = "ansi_cprng", |
2083 | .test = alg_test_cprng, | 2010 | .test = alg_test_cprng, |
2084 | .fips_allowed = 1, | ||
2085 | .suite = { | 2011 | .suite = { |
2086 | .cprng = { | 2012 | .cprng = { |
2087 | .vecs = ansi_cprng_aes_tv_template, | 2013 | .vecs = ansi_cprng_aes_tv_template, |
@@ -2132,6 +2058,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
2132 | }, { | 2058 | }, { |
2133 | .alg = "authenc(hmac(sha1),cbc(des3_ede))", | 2059 | .alg = "authenc(hmac(sha1),cbc(des3_ede))", |
2134 | .test = alg_test_aead, | 2060 | .test = alg_test_aead, |
2061 | .fips_allowed = 1, | ||
2135 | .suite = { | 2062 | .suite = { |
2136 | .aead = { | 2063 | .aead = { |
2137 | .enc = { | 2064 | .enc = { |
@@ -2143,6 +2070,10 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
2143 | } | 2070 | } |
2144 | } | 2071 | } |
2145 | }, { | 2072 | }, { |
2073 | .alg = "authenc(hmac(sha1),ctr(aes))", | ||
2074 | .test = alg_test_null, | ||
2075 | .fips_allowed = 1, | ||
2076 | }, { | ||
2146 | .alg = "authenc(hmac(sha1),ecb(cipher_null))", | 2077 | .alg = "authenc(hmac(sha1),ecb(cipher_null))", |
2147 | .test = alg_test_aead, | 2078 | .test = alg_test_aead, |
2148 | .suite = { | 2079 | .suite = { |
@@ -2162,6 +2093,10 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
2162 | } | 2093 | } |
2163 | } | 2094 | } |
2164 | }, { | 2095 | }, { |
2096 | .alg = "authenc(hmac(sha1),rfc3686(ctr(aes)))", | ||
2097 | .test = alg_test_null, | ||
2098 | .fips_allowed = 1, | ||
2099 | }, { | ||
2165 | .alg = "authenc(hmac(sha224),cbc(des))", | 2100 | .alg = "authenc(hmac(sha224),cbc(des))", |
2166 | .test = alg_test_aead, | 2101 | .test = alg_test_aead, |
2167 | .suite = { | 2102 | .suite = { |
@@ -2177,6 +2112,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
2177 | }, { | 2112 | }, { |
2178 | .alg = "authenc(hmac(sha224),cbc(des3_ede))", | 2113 | .alg = "authenc(hmac(sha224),cbc(des3_ede))", |
2179 | .test = alg_test_aead, | 2114 | .test = alg_test_aead, |
2115 | .fips_allowed = 1, | ||
2180 | .suite = { | 2116 | .suite = { |
2181 | .aead = { | 2117 | .aead = { |
2182 | .enc = { | 2118 | .enc = { |
@@ -2190,6 +2126,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
2190 | }, { | 2126 | }, { |
2191 | .alg = "authenc(hmac(sha256),cbc(aes))", | 2127 | .alg = "authenc(hmac(sha256),cbc(aes))", |
2192 | .test = alg_test_aead, | 2128 | .test = alg_test_aead, |
2129 | .fips_allowed = 1, | ||
2193 | .suite = { | 2130 | .suite = { |
2194 | .aead = { | 2131 | .aead = { |
2195 | .enc = { | 2132 | .enc = { |
@@ -2216,6 +2153,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
2216 | }, { | 2153 | }, { |
2217 | .alg = "authenc(hmac(sha256),cbc(des3_ede))", | 2154 | .alg = "authenc(hmac(sha256),cbc(des3_ede))", |
2218 | .test = alg_test_aead, | 2155 | .test = alg_test_aead, |
2156 | .fips_allowed = 1, | ||
2219 | .suite = { | 2157 | .suite = { |
2220 | .aead = { | 2158 | .aead = { |
2221 | .enc = { | 2159 | .enc = { |
@@ -2227,6 +2165,14 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
2227 | } | 2165 | } |
2228 | } | 2166 | } |
2229 | }, { | 2167 | }, { |
2168 | .alg = "authenc(hmac(sha256),ctr(aes))", | ||
2169 | .test = alg_test_null, | ||
2170 | .fips_allowed = 1, | ||
2171 | }, { | ||
2172 | .alg = "authenc(hmac(sha256),rfc3686(ctr(aes)))", | ||
2173 | .test = alg_test_null, | ||
2174 | .fips_allowed = 1, | ||
2175 | }, { | ||
2230 | .alg = "authenc(hmac(sha384),cbc(des))", | 2176 | .alg = "authenc(hmac(sha384),cbc(des))", |
2231 | .test = alg_test_aead, | 2177 | .test = alg_test_aead, |
2232 | .suite = { | 2178 | .suite = { |
@@ -2242,6 +2188,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
2242 | }, { | 2188 | }, { |
2243 | .alg = "authenc(hmac(sha384),cbc(des3_ede))", | 2189 | .alg = "authenc(hmac(sha384),cbc(des3_ede))", |
2244 | .test = alg_test_aead, | 2190 | .test = alg_test_aead, |
2191 | .fips_allowed = 1, | ||
2245 | .suite = { | 2192 | .suite = { |
2246 | .aead = { | 2193 | .aead = { |
2247 | .enc = { | 2194 | .enc = { |
@@ -2253,7 +2200,16 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
2253 | } | 2200 | } |
2254 | } | 2201 | } |
2255 | }, { | 2202 | }, { |
2203 | .alg = "authenc(hmac(sha384),ctr(aes))", | ||
2204 | .test = alg_test_null, | ||
2205 | .fips_allowed = 1, | ||
2206 | }, { | ||
2207 | .alg = "authenc(hmac(sha384),rfc3686(ctr(aes)))", | ||
2208 | .test = alg_test_null, | ||
2209 | .fips_allowed = 1, | ||
2210 | }, { | ||
2256 | .alg = "authenc(hmac(sha512),cbc(aes))", | 2211 | .alg = "authenc(hmac(sha512),cbc(aes))", |
2212 | .fips_allowed = 1, | ||
2257 | .test = alg_test_aead, | 2213 | .test = alg_test_aead, |
2258 | .suite = { | 2214 | .suite = { |
2259 | .aead = { | 2215 | .aead = { |
@@ -2281,6 +2237,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
2281 | }, { | 2237 | }, { |
2282 | .alg = "authenc(hmac(sha512),cbc(des3_ede))", | 2238 | .alg = "authenc(hmac(sha512),cbc(des3_ede))", |
2283 | .test = alg_test_aead, | 2239 | .test = alg_test_aead, |
2240 | .fips_allowed = 1, | ||
2284 | .suite = { | 2241 | .suite = { |
2285 | .aead = { | 2242 | .aead = { |
2286 | .enc = { | 2243 | .enc = { |
@@ -2292,6 +2249,14 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
2292 | } | 2249 | } |
2293 | } | 2250 | } |
2294 | }, { | 2251 | }, { |
2252 | .alg = "authenc(hmac(sha512),ctr(aes))", | ||
2253 | .test = alg_test_null, | ||
2254 | .fips_allowed = 1, | ||
2255 | }, { | ||
2256 | .alg = "authenc(hmac(sha512),rfc3686(ctr(aes)))", | ||
2257 | .test = alg_test_null, | ||
2258 | .fips_allowed = 1, | ||
2259 | }, { | ||
2295 | .alg = "cbc(aes)", | 2260 | .alg = "cbc(aes)", |
2296 | .test = alg_test_skcipher, | 2261 | .test = alg_test_skcipher, |
2297 | .fips_allowed = 1, | 2262 | .fips_allowed = 1, |
@@ -3840,22 +3805,6 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
3840 | } | 3805 | } |
3841 | } | 3806 | } |
3842 | } | 3807 | } |
3843 | }, { | ||
3844 | .alg = "zlib", | ||
3845 | .test = alg_test_pcomp, | ||
3846 | .fips_allowed = 1, | ||
3847 | .suite = { | ||
3848 | .pcomp = { | ||
3849 | .comp = { | ||
3850 | .vecs = zlib_comp_tv_template, | ||
3851 | .count = ZLIB_COMP_TEST_VECTORS | ||
3852 | }, | ||
3853 | .decomp = { | ||
3854 | .vecs = zlib_decomp_tv_template, | ||
3855 | .count = ZLIB_DECOMP_TEST_VECTORS | ||
3856 | } | ||
3857 | } | ||
3858 | } | ||
3859 | } | 3808 | } |
3860 | }; | 3809 | }; |
3861 | 3810 | ||
diff --git a/crypto/testmgr.h b/crypto/testmgr.h index da0a8fd765f4..487ec880e889 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h | |||
@@ -25,9 +25,6 @@ | |||
25 | #define _CRYPTO_TESTMGR_H | 25 | #define _CRYPTO_TESTMGR_H |
26 | 26 | ||
27 | #include <linux/netlink.h> | 27 | #include <linux/netlink.h> |
28 | #include <linux/zlib.h> | ||
29 | |||
30 | #include <crypto/compress.h> | ||
31 | 28 | ||
32 | #define MAX_DIGEST_SIZE 64 | 29 | #define MAX_DIGEST_SIZE 64 |
33 | #define MAX_TAP 8 | 30 | #define MAX_TAP 8 |
@@ -32268,14 +32265,6 @@ struct comp_testvec { | |||
32268 | char output[COMP_BUF_SIZE]; | 32265 | char output[COMP_BUF_SIZE]; |
32269 | }; | 32266 | }; |
32270 | 32267 | ||
32271 | struct pcomp_testvec { | ||
32272 | const void *params; | ||
32273 | unsigned int paramsize; | ||
32274 | int inlen, outlen; | ||
32275 | char input[COMP_BUF_SIZE]; | ||
32276 | char output[COMP_BUF_SIZE]; | ||
32277 | }; | ||
32278 | |||
32279 | /* | 32268 | /* |
32280 | * Deflate test vectors (null-terminated strings). | 32269 | * Deflate test vectors (null-terminated strings). |
32281 | * Params: winbits=-11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL. | 32270 | * Params: winbits=-11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL. |
@@ -32356,139 +32345,6 @@ static struct comp_testvec deflate_decomp_tv_template[] = { | |||
32356 | }, | 32345 | }, |
32357 | }; | 32346 | }; |
32358 | 32347 | ||
32359 | #define ZLIB_COMP_TEST_VECTORS 2 | ||
32360 | #define ZLIB_DECOMP_TEST_VECTORS 2 | ||
32361 | |||
32362 | static const struct { | ||
32363 | struct nlattr nla; | ||
32364 | int val; | ||
32365 | } deflate_comp_params[] = { | ||
32366 | { | ||
32367 | .nla = { | ||
32368 | .nla_len = NLA_HDRLEN + sizeof(int), | ||
32369 | .nla_type = ZLIB_COMP_LEVEL, | ||
32370 | }, | ||
32371 | .val = Z_DEFAULT_COMPRESSION, | ||
32372 | }, { | ||
32373 | .nla = { | ||
32374 | .nla_len = NLA_HDRLEN + sizeof(int), | ||
32375 | .nla_type = ZLIB_COMP_METHOD, | ||
32376 | }, | ||
32377 | .val = Z_DEFLATED, | ||
32378 | }, { | ||
32379 | .nla = { | ||
32380 | .nla_len = NLA_HDRLEN + sizeof(int), | ||
32381 | .nla_type = ZLIB_COMP_WINDOWBITS, | ||
32382 | }, | ||
32383 | .val = -11, | ||
32384 | }, { | ||
32385 | .nla = { | ||
32386 | .nla_len = NLA_HDRLEN + sizeof(int), | ||
32387 | .nla_type = ZLIB_COMP_MEMLEVEL, | ||
32388 | }, | ||
32389 | .val = MAX_MEM_LEVEL, | ||
32390 | }, { | ||
32391 | .nla = { | ||
32392 | .nla_len = NLA_HDRLEN + sizeof(int), | ||
32393 | .nla_type = ZLIB_COMP_STRATEGY, | ||
32394 | }, | ||
32395 | .val = Z_DEFAULT_STRATEGY, | ||
32396 | } | ||
32397 | }; | ||
32398 | |||
32399 | static const struct { | ||
32400 | struct nlattr nla; | ||
32401 | int val; | ||
32402 | } deflate_decomp_params[] = { | ||
32403 | { | ||
32404 | .nla = { | ||
32405 | .nla_len = NLA_HDRLEN + sizeof(int), | ||
32406 | .nla_type = ZLIB_DECOMP_WINDOWBITS, | ||
32407 | }, | ||
32408 | .val = -11, | ||
32409 | } | ||
32410 | }; | ||
32411 | |||
32412 | static struct pcomp_testvec zlib_comp_tv_template[] = { | ||
32413 | { | ||
32414 | .params = &deflate_comp_params, | ||
32415 | .paramsize = sizeof(deflate_comp_params), | ||
32416 | .inlen = 70, | ||
32417 | .outlen = 38, | ||
32418 | .input = "Join us now and share the software " | ||
32419 | "Join us now and share the software ", | ||
32420 | .output = "\xf3\xca\xcf\xcc\x53\x28\x2d\x56" | ||
32421 | "\xc8\xcb\x2f\x57\x48\xcc\x4b\x51" | ||
32422 | "\x28\xce\x48\x2c\x4a\x55\x28\xc9" | ||
32423 | "\x48\x55\x28\xce\x4f\x2b\x29\x07" | ||
32424 | "\x71\xbc\x08\x2b\x01\x00", | ||
32425 | }, { | ||
32426 | .params = &deflate_comp_params, | ||
32427 | .paramsize = sizeof(deflate_comp_params), | ||
32428 | .inlen = 191, | ||
32429 | .outlen = 122, | ||
32430 | .input = "This document describes a compression method based on the DEFLATE" | ||
32431 | "compression algorithm. This document defines the application of " | ||
32432 | "the DEFLATE algorithm to the IP Payload Compression Protocol.", | ||
32433 | .output = "\x5d\x8d\x31\x0e\xc2\x30\x10\x04" | ||
32434 | "\xbf\xb2\x2f\xc8\x1f\x10\x04\x09" | ||
32435 | "\x89\xc2\x85\x3f\x70\xb1\x2f\xf8" | ||
32436 | "\x24\xdb\x67\xd9\x47\xc1\xef\x49" | ||
32437 | "\x68\x12\x51\xae\x76\x67\xd6\x27" | ||
32438 | "\x19\x88\x1a\xde\x85\xab\x21\xf2" | ||
32439 | "\x08\x5d\x16\x1e\x20\x04\x2d\xad" | ||
32440 | "\xf3\x18\xa2\x15\x85\x2d\x69\xc4" | ||
32441 | "\x42\x83\x23\xb6\x6c\x89\x71\x9b" | ||
32442 | "\xef\xcf\x8b\x9f\xcf\x33\xca\x2f" | ||
32443 | "\xed\x62\xa9\x4c\x80\xff\x13\xaf" | ||
32444 | "\x52\x37\xed\x0e\x52\x6b\x59\x02" | ||
32445 | "\xd9\x4e\xe8\x7a\x76\x1d\x02\x98" | ||
32446 | "\xfe\x8a\x87\x83\xa3\x4f\x56\x8a" | ||
32447 | "\xb8\x9e\x8e\x5c\x57\xd3\xa0\x79" | ||
32448 | "\xfa\x02", | ||
32449 | }, | ||
32450 | }; | ||
32451 | |||
32452 | static struct pcomp_testvec zlib_decomp_tv_template[] = { | ||
32453 | { | ||
32454 | .params = &deflate_decomp_params, | ||
32455 | .paramsize = sizeof(deflate_decomp_params), | ||
32456 | .inlen = 122, | ||
32457 | .outlen = 191, | ||
32458 | .input = "\x5d\x8d\x31\x0e\xc2\x30\x10\x04" | ||
32459 | "\xbf\xb2\x2f\xc8\x1f\x10\x04\x09" | ||
32460 | "\x89\xc2\x85\x3f\x70\xb1\x2f\xf8" | ||
32461 | "\x24\xdb\x67\xd9\x47\xc1\xef\x49" | ||
32462 | "\x68\x12\x51\xae\x76\x67\xd6\x27" | ||
32463 | "\x19\x88\x1a\xde\x85\xab\x21\xf2" | ||
32464 | "\x08\x5d\x16\x1e\x20\x04\x2d\xad" | ||
32465 | "\xf3\x18\xa2\x15\x85\x2d\x69\xc4" | ||
32466 | "\x42\x83\x23\xb6\x6c\x89\x71\x9b" | ||
32467 | "\xef\xcf\x8b\x9f\xcf\x33\xca\x2f" | ||
32468 | "\xed\x62\xa9\x4c\x80\xff\x13\xaf" | ||
32469 | "\x52\x37\xed\x0e\x52\x6b\x59\x02" | ||
32470 | "\xd9\x4e\xe8\x7a\x76\x1d\x02\x98" | ||
32471 | "\xfe\x8a\x87\x83\xa3\x4f\x56\x8a" | ||
32472 | "\xb8\x9e\x8e\x5c\x57\xd3\xa0\x79" | ||
32473 | "\xfa\x02", | ||
32474 | .output = "This document describes a compression method based on the DEFLATE" | ||
32475 | "compression algorithm. This document defines the application of " | ||
32476 | "the DEFLATE algorithm to the IP Payload Compression Protocol.", | ||
32477 | }, { | ||
32478 | .params = &deflate_decomp_params, | ||
32479 | .paramsize = sizeof(deflate_decomp_params), | ||
32480 | .inlen = 38, | ||
32481 | .outlen = 70, | ||
32482 | .input = "\xf3\xca\xcf\xcc\x53\x28\x2d\x56" | ||
32483 | "\xc8\xcb\x2f\x57\x48\xcc\x4b\x51" | ||
32484 | "\x28\xce\x48\x2c\x4a\x55\x28\xc9" | ||
32485 | "\x48\x55\x28\xce\x4f\x2b\x29\x07" | ||
32486 | "\x71\xbc\x08\x2b\x01\x00", | ||
32487 | .output = "Join us now and share the software " | ||
32488 | "Join us now and share the software ", | ||
32489 | }, | ||
32490 | }; | ||
32491 | |||
32492 | /* | 32348 | /* |
32493 | * LZO test vectors (null-terminated strings). | 32349 | * LZO test vectors (null-terminated strings). |
32494 | */ | 32350 | */ |
diff --git a/crypto/xts.c b/crypto/xts.c index f6fd43f100c8..26ba5833b994 100644 --- a/crypto/xts.c +++ b/crypto/xts.c | |||
@@ -35,16 +35,11 @@ static int setkey(struct crypto_tfm *parent, const u8 *key, | |||
35 | { | 35 | { |
36 | struct priv *ctx = crypto_tfm_ctx(parent); | 36 | struct priv *ctx = crypto_tfm_ctx(parent); |
37 | struct crypto_cipher *child = ctx->tweak; | 37 | struct crypto_cipher *child = ctx->tweak; |
38 | u32 *flags = &parent->crt_flags; | ||
39 | int err; | 38 | int err; |
40 | 39 | ||
41 | /* key consists of keys of equal size concatenated, therefore | 40 | err = xts_check_key(parent, key, keylen); |
42 | * the length must be even */ | 41 | if (err) |
43 | if (keylen % 2) { | 42 | return err; |
44 | /* tell the user why there was an error */ | ||
45 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | ||
46 | return -EINVAL; | ||
47 | } | ||
48 | 43 | ||
49 | /* we need two cipher instances: one to compute the initial 'tweak' | 44 | /* we need two cipher instances: one to compute the initial 'tweak' |
50 | * by encrypting the IV (usually the 'plain' iv) and the other | 45 | * by encrypting the IV (usually the 'plain' iv) and the other |
diff --git a/crypto/zlib.c b/crypto/zlib.c deleted file mode 100644 index d51a30a29e42..000000000000 --- a/crypto/zlib.c +++ /dev/null | |||
@@ -1,381 +0,0 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * Zlib algorithm | ||
5 | * | ||
6 | * Copyright 2008 Sony Corporation | ||
7 | * | ||
8 | * Based on deflate.c, which is | ||
9 | * Copyright (c) 2003 James Morris <jmorris@intercode.com.au> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify it | ||
12 | * under the terms of the GNU General Public License as published by the Free | ||
13 | * Software Foundation; either version 2 of the License, or (at your option) | ||
14 | * any later version. | ||
15 | * | ||
16 | * FIXME: deflate transforms will require up to a total of about 436k of kernel | ||
17 | * memory on i386 (390k for compression, the rest for decompression), as the | ||
18 | * current zlib kernel code uses a worst case pre-allocation system by default. | ||
19 | * This needs to be fixed so that the amount of memory required is properly | ||
20 | * related to the winbits and memlevel parameters. | ||
21 | */ | ||
22 | |||
23 | #define pr_fmt(fmt) "%s: " fmt, __func__ | ||
24 | |||
25 | #include <linux/init.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/zlib.h> | ||
28 | #include <linux/vmalloc.h> | ||
29 | #include <linux/interrupt.h> | ||
30 | #include <linux/mm.h> | ||
31 | #include <linux/net.h> | ||
32 | |||
33 | #include <crypto/internal/compress.h> | ||
34 | |||
35 | #include <net/netlink.h> | ||
36 | |||
37 | |||
38 | struct zlib_ctx { | ||
39 | struct z_stream_s comp_stream; | ||
40 | struct z_stream_s decomp_stream; | ||
41 | int decomp_windowBits; | ||
42 | }; | ||
43 | |||
44 | |||
45 | static void zlib_comp_exit(struct zlib_ctx *ctx) | ||
46 | { | ||
47 | struct z_stream_s *stream = &ctx->comp_stream; | ||
48 | |||
49 | if (stream->workspace) { | ||
50 | zlib_deflateEnd(stream); | ||
51 | vfree(stream->workspace); | ||
52 | stream->workspace = NULL; | ||
53 | } | ||
54 | } | ||
55 | |||
56 | static void zlib_decomp_exit(struct zlib_ctx *ctx) | ||
57 | { | ||
58 | struct z_stream_s *stream = &ctx->decomp_stream; | ||
59 | |||
60 | if (stream->workspace) { | ||
61 | zlib_inflateEnd(stream); | ||
62 | vfree(stream->workspace); | ||
63 | stream->workspace = NULL; | ||
64 | } | ||
65 | } | ||
66 | |||
67 | static int zlib_init(struct crypto_tfm *tfm) | ||
68 | { | ||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static void zlib_exit(struct crypto_tfm *tfm) | ||
73 | { | ||
74 | struct zlib_ctx *ctx = crypto_tfm_ctx(tfm); | ||
75 | |||
76 | zlib_comp_exit(ctx); | ||
77 | zlib_decomp_exit(ctx); | ||
78 | } | ||
79 | |||
80 | |||
81 | static int zlib_compress_setup(struct crypto_pcomp *tfm, const void *params, | ||
82 | unsigned int len) | ||
83 | { | ||
84 | struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); | ||
85 | struct z_stream_s *stream = &ctx->comp_stream; | ||
86 | struct nlattr *tb[ZLIB_COMP_MAX + 1]; | ||
87 | int window_bits, mem_level; | ||
88 | size_t workspacesize; | ||
89 | int ret; | ||
90 | |||
91 | ret = nla_parse(tb, ZLIB_COMP_MAX, params, len, NULL); | ||
92 | if (ret) | ||
93 | return ret; | ||
94 | |||
95 | zlib_comp_exit(ctx); | ||
96 | |||
97 | window_bits = tb[ZLIB_COMP_WINDOWBITS] | ||
98 | ? nla_get_u32(tb[ZLIB_COMP_WINDOWBITS]) | ||
99 | : MAX_WBITS; | ||
100 | mem_level = tb[ZLIB_COMP_MEMLEVEL] | ||
101 | ? nla_get_u32(tb[ZLIB_COMP_MEMLEVEL]) | ||
102 | : DEF_MEM_LEVEL; | ||
103 | |||
104 | workspacesize = zlib_deflate_workspacesize(window_bits, mem_level); | ||
105 | stream->workspace = vzalloc(workspacesize); | ||
106 | if (!stream->workspace) | ||
107 | return -ENOMEM; | ||
108 | |||
109 | ret = zlib_deflateInit2(stream, | ||
110 | tb[ZLIB_COMP_LEVEL] | ||
111 | ? nla_get_u32(tb[ZLIB_COMP_LEVEL]) | ||
112 | : Z_DEFAULT_COMPRESSION, | ||
113 | tb[ZLIB_COMP_METHOD] | ||
114 | ? nla_get_u32(tb[ZLIB_COMP_METHOD]) | ||
115 | : Z_DEFLATED, | ||
116 | window_bits, | ||
117 | mem_level, | ||
118 | tb[ZLIB_COMP_STRATEGY] | ||
119 | ? nla_get_u32(tb[ZLIB_COMP_STRATEGY]) | ||
120 | : Z_DEFAULT_STRATEGY); | ||
121 | if (ret != Z_OK) { | ||
122 | vfree(stream->workspace); | ||
123 | stream->workspace = NULL; | ||
124 | return -EINVAL; | ||
125 | } | ||
126 | |||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | static int zlib_compress_init(struct crypto_pcomp *tfm) | ||
131 | { | ||
132 | int ret; | ||
133 | struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); | ||
134 | struct z_stream_s *stream = &dctx->comp_stream; | ||
135 | |||
136 | ret = zlib_deflateReset(stream); | ||
137 | if (ret != Z_OK) | ||
138 | return -EINVAL; | ||
139 | |||
140 | return 0; | ||
141 | } | ||
142 | |||
143 | static int zlib_compress_update(struct crypto_pcomp *tfm, | ||
144 | struct comp_request *req) | ||
145 | { | ||
146 | int ret; | ||
147 | struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); | ||
148 | struct z_stream_s *stream = &dctx->comp_stream; | ||
149 | |||
150 | pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out); | ||
151 | stream->next_in = req->next_in; | ||
152 | stream->avail_in = req->avail_in; | ||
153 | stream->next_out = req->next_out; | ||
154 | stream->avail_out = req->avail_out; | ||
155 | |||
156 | ret = zlib_deflate(stream, Z_NO_FLUSH); | ||
157 | switch (ret) { | ||
158 | case Z_OK: | ||
159 | break; | ||
160 | |||
161 | case Z_BUF_ERROR: | ||
162 | pr_debug("zlib_deflate could not make progress\n"); | ||
163 | return -EAGAIN; | ||
164 | |||
165 | default: | ||
166 | pr_debug("zlib_deflate failed %d\n", ret); | ||
167 | return -EINVAL; | ||
168 | } | ||
169 | |||
170 | ret = req->avail_out - stream->avail_out; | ||
171 | pr_debug("avail_in %lu, avail_out %lu (consumed %lu, produced %u)\n", | ||
172 | stream->avail_in, stream->avail_out, | ||
173 | req->avail_in - stream->avail_in, ret); | ||
174 | req->next_in = stream->next_in; | ||
175 | req->avail_in = stream->avail_in; | ||
176 | req->next_out = stream->next_out; | ||
177 | req->avail_out = stream->avail_out; | ||
178 | return ret; | ||
179 | } | ||
180 | |||
181 | static int zlib_compress_final(struct crypto_pcomp *tfm, | ||
182 | struct comp_request *req) | ||
183 | { | ||
184 | int ret; | ||
185 | struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); | ||
186 | struct z_stream_s *stream = &dctx->comp_stream; | ||
187 | |||
188 | pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out); | ||
189 | stream->next_in = req->next_in; | ||
190 | stream->avail_in = req->avail_in; | ||
191 | stream->next_out = req->next_out; | ||
192 | stream->avail_out = req->avail_out; | ||
193 | |||
194 | ret = zlib_deflate(stream, Z_FINISH); | ||
195 | if (ret != Z_STREAM_END) { | ||
196 | pr_debug("zlib_deflate failed %d\n", ret); | ||
197 | return -EINVAL; | ||
198 | } | ||
199 | |||
200 | ret = req->avail_out - stream->avail_out; | ||
201 | pr_debug("avail_in %lu, avail_out %lu (consumed %lu, produced %u)\n", | ||
202 | stream->avail_in, stream->avail_out, | ||
203 | req->avail_in - stream->avail_in, ret); | ||
204 | req->next_in = stream->next_in; | ||
205 | req->avail_in = stream->avail_in; | ||
206 | req->next_out = stream->next_out; | ||
207 | req->avail_out = stream->avail_out; | ||
208 | return ret; | ||
209 | } | ||
210 | |||
211 | |||
212 | static int zlib_decompress_setup(struct crypto_pcomp *tfm, const void *params, | ||
213 | unsigned int len) | ||
214 | { | ||
215 | struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); | ||
216 | struct z_stream_s *stream = &ctx->decomp_stream; | ||
217 | struct nlattr *tb[ZLIB_DECOMP_MAX + 1]; | ||
218 | int ret = 0; | ||
219 | |||
220 | ret = nla_parse(tb, ZLIB_DECOMP_MAX, params, len, NULL); | ||
221 | if (ret) | ||
222 | return ret; | ||
223 | |||
224 | zlib_decomp_exit(ctx); | ||
225 | |||
226 | ctx->decomp_windowBits = tb[ZLIB_DECOMP_WINDOWBITS] | ||
227 | ? nla_get_u32(tb[ZLIB_DECOMP_WINDOWBITS]) | ||
228 | : DEF_WBITS; | ||
229 | |||
230 | stream->workspace = vzalloc(zlib_inflate_workspacesize()); | ||
231 | if (!stream->workspace) | ||
232 | return -ENOMEM; | ||
233 | |||
234 | ret = zlib_inflateInit2(stream, ctx->decomp_windowBits); | ||
235 | if (ret != Z_OK) { | ||
236 | vfree(stream->workspace); | ||
237 | stream->workspace = NULL; | ||
238 | return -EINVAL; | ||
239 | } | ||
240 | |||
241 | return 0; | ||
242 | } | ||
243 | |||
244 | static int zlib_decompress_init(struct crypto_pcomp *tfm) | ||
245 | { | ||
246 | int ret; | ||
247 | struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); | ||
248 | struct z_stream_s *stream = &dctx->decomp_stream; | ||
249 | |||
250 | ret = zlib_inflateReset(stream); | ||
251 | if (ret != Z_OK) | ||
252 | return -EINVAL; | ||
253 | |||
254 | return 0; | ||
255 | } | ||
256 | |||
257 | static int zlib_decompress_update(struct crypto_pcomp *tfm, | ||
258 | struct comp_request *req) | ||
259 | { | ||
260 | int ret; | ||
261 | struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); | ||
262 | struct z_stream_s *stream = &dctx->decomp_stream; | ||
263 | |||
264 | pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out); | ||
265 | stream->next_in = req->next_in; | ||
266 | stream->avail_in = req->avail_in; | ||
267 | stream->next_out = req->next_out; | ||
268 | stream->avail_out = req->avail_out; | ||
269 | |||
270 | ret = zlib_inflate(stream, Z_SYNC_FLUSH); | ||
271 | switch (ret) { | ||
272 | case Z_OK: | ||
273 | case Z_STREAM_END: | ||
274 | break; | ||
275 | |||
276 | case Z_BUF_ERROR: | ||
277 | pr_debug("zlib_inflate could not make progress\n"); | ||
278 | return -EAGAIN; | ||
279 | |||
280 | default: | ||
281 | pr_debug("zlib_inflate failed %d\n", ret); | ||
282 | return -EINVAL; | ||
283 | } | ||
284 | |||
285 | ret = req->avail_out - stream->avail_out; | ||
286 | pr_debug("avail_in %lu, avail_out %lu (consumed %lu, produced %u)\n", | ||
287 | stream->avail_in, stream->avail_out, | ||
288 | req->avail_in - stream->avail_in, ret); | ||
289 | req->next_in = stream->next_in; | ||
290 | req->avail_in = stream->avail_in; | ||
291 | req->next_out = stream->next_out; | ||
292 | req->avail_out = stream->avail_out; | ||
293 | return ret; | ||
294 | } | ||
295 | |||
296 | static int zlib_decompress_final(struct crypto_pcomp *tfm, | ||
297 | struct comp_request *req) | ||
298 | { | ||
299 | int ret; | ||
300 | struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); | ||
301 | struct z_stream_s *stream = &dctx->decomp_stream; | ||
302 | |||
303 | pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out); | ||
304 | stream->next_in = req->next_in; | ||
305 | stream->avail_in = req->avail_in; | ||
306 | stream->next_out = req->next_out; | ||
307 | stream->avail_out = req->avail_out; | ||
308 | |||
309 | if (dctx->decomp_windowBits < 0) { | ||
310 | ret = zlib_inflate(stream, Z_SYNC_FLUSH); | ||
311 | /* | ||
312 | * Work around a bug in zlib, which sometimes wants to taste an | ||
313 | * extra byte when being used in the (undocumented) raw deflate | ||
314 | * mode. (From USAGI). | ||
315 | */ | ||
316 | if (ret == Z_OK && !stream->avail_in && stream->avail_out) { | ||
317 | const void *saved_next_in = stream->next_in; | ||
318 | u8 zerostuff = 0; | ||
319 | |||
320 | stream->next_in = &zerostuff; | ||
321 | stream->avail_in = 1; | ||
322 | ret = zlib_inflate(stream, Z_FINISH); | ||
323 | stream->next_in = saved_next_in; | ||
324 | stream->avail_in = 0; | ||
325 | } | ||
326 | } else | ||
327 | ret = zlib_inflate(stream, Z_FINISH); | ||
328 | if (ret != Z_STREAM_END) { | ||
329 | pr_debug("zlib_inflate failed %d\n", ret); | ||
330 | return -EINVAL; | ||
331 | } | ||
332 | |||
333 | ret = req->avail_out - stream->avail_out; | ||
334 | pr_debug("avail_in %lu, avail_out %lu (consumed %lu, produced %u)\n", | ||
335 | stream->avail_in, stream->avail_out, | ||
336 | req->avail_in - stream->avail_in, ret); | ||
337 | req->next_in = stream->next_in; | ||
338 | req->avail_in = stream->avail_in; | ||
339 | req->next_out = stream->next_out; | ||
340 | req->avail_out = stream->avail_out; | ||
341 | return ret; | ||
342 | } | ||
343 | |||
344 | |||
345 | static struct pcomp_alg zlib_alg = { | ||
346 | .compress_setup = zlib_compress_setup, | ||
347 | .compress_init = zlib_compress_init, | ||
348 | .compress_update = zlib_compress_update, | ||
349 | .compress_final = zlib_compress_final, | ||
350 | .decompress_setup = zlib_decompress_setup, | ||
351 | .decompress_init = zlib_decompress_init, | ||
352 | .decompress_update = zlib_decompress_update, | ||
353 | .decompress_final = zlib_decompress_final, | ||
354 | |||
355 | .base = { | ||
356 | .cra_name = "zlib", | ||
357 | .cra_flags = CRYPTO_ALG_TYPE_PCOMPRESS, | ||
358 | .cra_ctxsize = sizeof(struct zlib_ctx), | ||
359 | .cra_module = THIS_MODULE, | ||
360 | .cra_init = zlib_init, | ||
361 | .cra_exit = zlib_exit, | ||
362 | } | ||
363 | }; | ||
364 | |||
365 | static int __init zlib_mod_init(void) | ||
366 | { | ||
367 | return crypto_register_pcomp(&zlib_alg); | ||
368 | } | ||
369 | |||
370 | static void __exit zlib_mod_fini(void) | ||
371 | { | ||
372 | crypto_unregister_pcomp(&zlib_alg); | ||
373 | } | ||
374 | |||
375 | module_init(zlib_mod_init); | ||
376 | module_exit(zlib_mod_fini); | ||
377 | |||
378 | MODULE_LICENSE("GPL"); | ||
379 | MODULE_DESCRIPTION("Zlib Compression Algorithm"); | ||
380 | MODULE_AUTHOR("Sony Corporation"); | ||
381 | MODULE_ALIAS_CRYPTO("zlib"); | ||