aboutsummaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-03-26 14:04:34 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-03-26 14:04:34 -0400
commit562f477a54478002ddfbb5b85627c009ca41e71d (patch)
tree52384cc554ae64cc7a26878d64d606f40fd703ce /crypto
parentada19a31a90b4f46c040c25ef4ef8ffc203c7fc6 (diff)
parent949abe574739848b1e68271fbac86c3cb4506aad (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (29 commits) crypto: sha512-s390 - Add missing block size hwrng: timeriomem - Breaks an allyesconfig build on s390: nlattr: Fix build error with NET off crypto: testmgr - add zlib test crypto: zlib - New zlib crypto module, using pcomp crypto: testmgr - Add support for the pcomp interface crypto: compress - Add pcomp interface netlink: Move netlink attribute parsing support to lib crypto: Fix dead links hwrng: timeriomem - New driver crypto: chainiv - Use kcrypto_wq instead of keventd_wq crypto: cryptd - Per-CPU thread implementation based on kcrypto_wq crypto: api - Use dedicated workqueue for crypto subsystem crypto: testmgr - Test skciphers with no IVs crypto: aead - Avoid infinite loop when nivaead fails selftest crypto: skcipher - Avoid infinite loop when cipher fails selftest crypto: api - Fix crypto_alloc_tfm/create_create_tfm return convention crypto: api - crypto_alg_mod_lookup either tested or untested crypto: amcc - Add crypt4xx driver crypto: ansi_cprng - Add maintainer ...
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig44
-rw-r--r--crypto/Makefile5
-rw-r--r--crypto/ablkcipher.c19
-rw-r--r--crypto/aead.c16
-rw-r--r--crypto/algboss.c20
-rw-r--r--crypto/ansi_cprng.c17
-rw-r--r--crypto/api.c17
-rw-r--r--crypto/blkcipher.c2
-rw-r--r--crypto/chainiv.c3
-rw-r--r--crypto/cryptd.c237
-rw-r--r--crypto/crypto_wq.c38
-rw-r--r--crypto/gf128mul.c2
-rw-r--r--crypto/internal.h6
-rw-r--r--crypto/pcompress.c97
-rw-r--r--crypto/sha256_generic.c2
-rw-r--r--crypto/shash.c20
-rw-r--r--crypto/tcrypt.c6
-rw-r--r--crypto/testmgr.c198
-rw-r--r--crypto/testmgr.h147
-rw-r--r--crypto/zlib.c378
20 files changed, 1130 insertions, 144 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 8dde4fcf99c9..74d0e622a515 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -56,6 +56,7 @@ config CRYPTO_BLKCIPHER2
56 tristate 56 tristate
57 select CRYPTO_ALGAPI2 57 select CRYPTO_ALGAPI2
58 select CRYPTO_RNG2 58 select CRYPTO_RNG2
59 select CRYPTO_WORKQUEUE
59 60
60config CRYPTO_HASH 61config CRYPTO_HASH
61 tristate 62 tristate
@@ -75,6 +76,10 @@ config CRYPTO_RNG2
75 tristate 76 tristate
76 select CRYPTO_ALGAPI2 77 select CRYPTO_ALGAPI2
77 78
79config CRYPTO_PCOMP
80 tristate
81 select CRYPTO_ALGAPI2
82
78config CRYPTO_MANAGER 83config CRYPTO_MANAGER
79 tristate "Cryptographic algorithm manager" 84 tristate "Cryptographic algorithm manager"
80 select CRYPTO_MANAGER2 85 select CRYPTO_MANAGER2
@@ -87,6 +92,7 @@ config CRYPTO_MANAGER2
87 select CRYPTO_AEAD2 92 select CRYPTO_AEAD2
88 select CRYPTO_HASH2 93 select CRYPTO_HASH2
89 select CRYPTO_BLKCIPHER2 94 select CRYPTO_BLKCIPHER2
95 select CRYPTO_PCOMP
90 96
91config CRYPTO_GF128MUL 97config CRYPTO_GF128MUL
92 tristate "GF(2^128) multiplication functions (EXPERIMENTAL)" 98 tristate "GF(2^128) multiplication functions (EXPERIMENTAL)"
@@ -106,11 +112,15 @@ config CRYPTO_NULL
106 help 112 help
107 These are 'Null' algorithms, used by IPsec, which do nothing. 113 These are 'Null' algorithms, used by IPsec, which do nothing.
108 114
115config CRYPTO_WORKQUEUE
116 tristate
117
109config CRYPTO_CRYPTD 118config CRYPTO_CRYPTD
110 tristate "Software async crypto daemon" 119 tristate "Software async crypto daemon"
111 select CRYPTO_BLKCIPHER 120 select CRYPTO_BLKCIPHER
112 select CRYPTO_HASH 121 select CRYPTO_HASH
113 select CRYPTO_MANAGER 122 select CRYPTO_MANAGER
123 select CRYPTO_WORKQUEUE
114 help 124 help
115 This is a generic software asynchronous crypto daemon that 125 This is a generic software asynchronous crypto daemon that
116 converts an arbitrary synchronous software crypto algorithm 126 converts an arbitrary synchronous software crypto algorithm
@@ -470,6 +480,31 @@ config CRYPTO_AES_X86_64
470 480
471 See <http://csrc.nist.gov/encryption/aes/> for more information. 481 See <http://csrc.nist.gov/encryption/aes/> for more information.
472 482
483config CRYPTO_AES_NI_INTEL
484 tristate "AES cipher algorithms (AES-NI)"
485 depends on (X86 || UML_X86) && 64BIT
486 select CRYPTO_AES_X86_64
487 select CRYPTO_CRYPTD
488 select CRYPTO_ALGAPI
489 help
490 Use Intel AES-NI instructions for AES algorithm.
491
492 AES cipher algorithms (FIPS-197). AES uses the Rijndael
493 algorithm.
494
495 Rijndael appears to be consistently a very good performer in
496 both hardware and software across a wide range of computing
497 environments regardless of its use in feedback or non-feedback
498 modes. Its key setup time is excellent, and its key agility is
499 good. Rijndael's very low memory requirements make it very well
500 suited for restricted-space environments, in which it also
501 demonstrates excellent performance. Rijndael's operations are
502 among the easiest to defend against power and timing attacks.
503
504 The AES specifies three key sizes: 128, 192 and 256 bits
505
506 See <http://csrc.nist.gov/encryption/aes/> for more information.
507
473config CRYPTO_ANUBIS 508config CRYPTO_ANUBIS
474 tristate "Anubis cipher algorithm" 509 tristate "Anubis cipher algorithm"
475 select CRYPTO_ALGAPI 510 select CRYPTO_ALGAPI
@@ -714,6 +749,15 @@ config CRYPTO_DEFLATE
714 749
715 You will most probably want this if using IPSec. 750 You will most probably want this if using IPSec.
716 751
752config CRYPTO_ZLIB
753 tristate "Zlib compression algorithm"
754 select CRYPTO_PCOMP
755 select ZLIB_INFLATE
756 select ZLIB_DEFLATE
757 select NLATTR
758 help
759 This is the zlib algorithm.
760
717config CRYPTO_LZO 761config CRYPTO_LZO
718 tristate "LZO compression algorithm" 762 tristate "LZO compression algorithm"
719 select CRYPTO_ALGAPI 763 select CRYPTO_ALGAPI
diff --git a/crypto/Makefile b/crypto/Makefile
index 46b08bf2035f..673d9f7c1bda 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -5,6 +5,8 @@
5obj-$(CONFIG_CRYPTO) += crypto.o 5obj-$(CONFIG_CRYPTO) += crypto.o
6crypto-objs := api.o cipher.o digest.o compress.o 6crypto-objs := api.o cipher.o digest.o compress.o
7 7
8obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o
9
8obj-$(CONFIG_CRYPTO_FIPS) += fips.o 10obj-$(CONFIG_CRYPTO_FIPS) += fips.o
9 11
10crypto_algapi-$(CONFIG_PROC_FS) += proc.o 12crypto_algapi-$(CONFIG_PROC_FS) += proc.o
@@ -25,6 +27,8 @@ crypto_hash-objs += ahash.o
25crypto_hash-objs += shash.o 27crypto_hash-objs += shash.o
26obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o 28obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
27 29
30obj-$(CONFIG_CRYPTO_PCOMP) += pcompress.o
31
28cryptomgr-objs := algboss.o testmgr.o 32cryptomgr-objs := algboss.o testmgr.o
29 33
30obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o 34obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
@@ -70,6 +74,7 @@ obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
70obj-$(CONFIG_CRYPTO_SEED) += seed.o 74obj-$(CONFIG_CRYPTO_SEED) += seed.o
71obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o 75obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o
72obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o 76obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
77obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o
73obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o 78obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
74obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o 79obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
75obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o 80obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index 94140b3756fc..e11ce37c7104 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -282,6 +282,25 @@ static struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type,
282 alg->cra_ablkcipher.ivsize)) 282 alg->cra_ablkcipher.ivsize))
283 return alg; 283 return alg;
284 284
285 crypto_mod_put(alg);
286 alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED,
287 mask & ~CRYPTO_ALG_TESTED);
288 if (IS_ERR(alg))
289 return alg;
290
291 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
292 CRYPTO_ALG_TYPE_GIVCIPHER) {
293 if ((alg->cra_flags ^ type ^ ~mask) & CRYPTO_ALG_TESTED) {
294 crypto_mod_put(alg);
295 alg = ERR_PTR(-ENOENT);
296 }
297 return alg;
298 }
299
300 BUG_ON(!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
301 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
302 alg->cra_ablkcipher.ivsize));
303
285 return ERR_PTR(crypto_givcipher_default(alg, type, mask)); 304 return ERR_PTR(crypto_givcipher_default(alg, type, mask));
286} 305}
287 306
diff --git a/crypto/aead.c b/crypto/aead.c
index 3a6f3f52c7c7..d9aa733db164 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -422,6 +422,22 @@ static struct crypto_alg *crypto_lookup_aead(const char *name, u32 type,
422 if (!alg->cra_aead.ivsize) 422 if (!alg->cra_aead.ivsize)
423 return alg; 423 return alg;
424 424
425 crypto_mod_put(alg);
426 alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED,
427 mask & ~CRYPTO_ALG_TESTED);
428 if (IS_ERR(alg))
429 return alg;
430
431 if (alg->cra_type == &crypto_aead_type) {
432 if ((alg->cra_flags ^ type ^ ~mask) & CRYPTO_ALG_TESTED) {
433 crypto_mod_put(alg);
434 alg = ERR_PTR(-ENOENT);
435 }
436 return alg;
437 }
438
439 BUG_ON(!alg->cra_aead.ivsize);
440
425 return ERR_PTR(crypto_nivaead_default(alg, type, mask)); 441 return ERR_PTR(crypto_nivaead_default(alg, type, mask));
426} 442}
427 443
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 4601e4267c88..6906f92aeac0 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -10,7 +10,7 @@
10 * 10 *
11 */ 11 */
12 12
13#include <linux/crypto.h> 13#include <crypto/internal/aead.h>
14#include <linux/ctype.h> 14#include <linux/ctype.h>
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/init.h> 16#include <linux/init.h>
@@ -206,8 +206,7 @@ static int cryptomgr_test(void *data)
206 u32 type = param->type; 206 u32 type = param->type;
207 int err = 0; 207 int err = 0;
208 208
209 if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) & 209 if (type & CRYPTO_ALG_TESTED)
210 CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV))
211 goto skiptest; 210 goto skiptest;
212 211
213 err = alg_test(param->driver, param->alg, type, CRYPTO_ALG_TESTED); 212 err = alg_test(param->driver, param->alg, type, CRYPTO_ALG_TESTED);
@@ -223,6 +222,7 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg)
223{ 222{
224 struct task_struct *thread; 223 struct task_struct *thread;
225 struct crypto_test_param *param; 224 struct crypto_test_param *param;
225 u32 type;
226 226
227 if (!try_module_get(THIS_MODULE)) 227 if (!try_module_get(THIS_MODULE))
228 goto err; 228 goto err;
@@ -233,7 +233,19 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg)
233 233
234 memcpy(param->driver, alg->cra_driver_name, sizeof(param->driver)); 234 memcpy(param->driver, alg->cra_driver_name, sizeof(param->driver));
235 memcpy(param->alg, alg->cra_name, sizeof(param->alg)); 235 memcpy(param->alg, alg->cra_name, sizeof(param->alg));
236 param->type = alg->cra_flags; 236 type = alg->cra_flags;
237
238 /* This piece of crap needs to disappear into per-type test hooks. */
239 if ((!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
240 CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
241 ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
242 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
243 alg->cra_ablkcipher.ivsize)) ||
244 (!((type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) &&
245 alg->cra_type == &crypto_nivaead_type && alg->cra_aead.ivsize))
246 type |= CRYPTO_ALG_TESTED;
247
248 param->type = type;
237 249
238 thread = kthread_run(cryptomgr_test, param, "cryptomgr_test"); 250 thread = kthread_run(cryptomgr_test, param, "cryptomgr_test");
239 if (IS_ERR(thread)) 251 if (IS_ERR(thread))
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index 0fac8ffc2fb7..d80ed4c1e009 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -132,9 +132,15 @@ static int _get_more_prng_bytes(struct prng_context *ctx)
132 */ 132 */
133 if (!memcmp(ctx->rand_data, ctx->last_rand_data, 133 if (!memcmp(ctx->rand_data, ctx->last_rand_data,
134 DEFAULT_BLK_SZ)) { 134 DEFAULT_BLK_SZ)) {
135 if (fips_enabled) {
136 panic("cprng %p Failed repetition check!\n",
137 ctx);
138 }
139
135 printk(KERN_ERR 140 printk(KERN_ERR
136 "ctx %p Failed repetition check!\n", 141 "ctx %p Failed repetition check!\n",
137 ctx); 142 ctx);
143
138 ctx->flags |= PRNG_NEED_RESET; 144 ctx->flags |= PRNG_NEED_RESET;
139 return -EINVAL; 145 return -EINVAL;
140 } 146 }
@@ -338,7 +344,16 @@ static int cprng_init(struct crypto_tfm *tfm)
338 344
339 spin_lock_init(&ctx->prng_lock); 345 spin_lock_init(&ctx->prng_lock);
340 346
341 return reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL); 347 if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0)
348 return -EINVAL;
349
350 /*
351 * after allocation, we should always force the user to reset
352 * so they don't inadvertently use the insecure default values
353 * without specifying them intentially
354 */
355 ctx->flags |= PRNG_NEED_RESET;
356 return 0;
342} 357}
343 358
344static void cprng_exit(struct crypto_tfm *tfm) 359static void cprng_exit(struct crypto_tfm *tfm)
diff --git a/crypto/api.c b/crypto/api.c
index 38a2bc02a98c..314dab96840e 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -255,7 +255,7 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
255 struct crypto_alg *larval; 255 struct crypto_alg *larval;
256 int ok; 256 int ok;
257 257
258 if (!(mask & CRYPTO_ALG_TESTED)) { 258 if (!((type | mask) & CRYPTO_ALG_TESTED)) {
259 type |= CRYPTO_ALG_TESTED; 259 type |= CRYPTO_ALG_TESTED;
260 mask |= CRYPTO_ALG_TESTED; 260 mask |= CRYPTO_ALG_TESTED;
261 } 261 }
@@ -464,8 +464,8 @@ err:
464} 464}
465EXPORT_SYMBOL_GPL(crypto_alloc_base); 465EXPORT_SYMBOL_GPL(crypto_alloc_base);
466 466
467struct crypto_tfm *crypto_create_tfm(struct crypto_alg *alg, 467void *crypto_create_tfm(struct crypto_alg *alg,
468 const struct crypto_type *frontend) 468 const struct crypto_type *frontend)
469{ 469{
470 char *mem; 470 char *mem;
471 struct crypto_tfm *tfm = NULL; 471 struct crypto_tfm *tfm = NULL;
@@ -499,9 +499,9 @@ out_free_tfm:
499 crypto_shoot_alg(alg); 499 crypto_shoot_alg(alg);
500 kfree(mem); 500 kfree(mem);
501out_err: 501out_err:
502 tfm = ERR_PTR(err); 502 mem = ERR_PTR(err);
503out: 503out:
504 return tfm; 504 return mem;
505} 505}
506EXPORT_SYMBOL_GPL(crypto_create_tfm); 506EXPORT_SYMBOL_GPL(crypto_create_tfm);
507 507
@@ -525,12 +525,11 @@ EXPORT_SYMBOL_GPL(crypto_create_tfm);
525 * 525 *
526 * In case of error the return value is an error pointer. 526 * In case of error the return value is an error pointer.
527 */ 527 */
528struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, 528void *crypto_alloc_tfm(const char *alg_name,
529 const struct crypto_type *frontend, 529 const struct crypto_type *frontend, u32 type, u32 mask)
530 u32 type, u32 mask)
531{ 530{
532 struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask); 531 struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
533 struct crypto_tfm *tfm; 532 void *tfm;
534 int err; 533 int err;
535 534
536 type &= frontend->maskclear; 535 type &= frontend->maskclear;
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index d70a41c002df..90d26c91f4e9 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -521,7 +521,7 @@ static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
521 int err; 521 int err;
522 522
523 type = crypto_skcipher_type(type); 523 type = crypto_skcipher_type(type);
524 mask = crypto_skcipher_mask(mask) | CRYPTO_ALG_GENIV; 524 mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV;
525 525
526 alg = crypto_alg_mod_lookup(name, type, mask); 526 alg = crypto_alg_mod_lookup(name, type, mask);
527 if (IS_ERR(alg)) 527 if (IS_ERR(alg))
diff --git a/crypto/chainiv.c b/crypto/chainiv.c
index 7c37a497b860..ba200b07449d 100644
--- a/crypto/chainiv.c
+++ b/crypto/chainiv.c
@@ -15,6 +15,7 @@
15 15
16#include <crypto/internal/skcipher.h> 16#include <crypto/internal/skcipher.h>
17#include <crypto/rng.h> 17#include <crypto/rng.h>
18#include <crypto/crypto_wq.h>
18#include <linux/err.h> 19#include <linux/err.h>
19#include <linux/init.h> 20#include <linux/init.h>
20#include <linux/kernel.h> 21#include <linux/kernel.h>
@@ -133,7 +134,7 @@ static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
133 goto out; 134 goto out;
134 } 135 }
135 136
136 queued = schedule_work(&ctx->postponed); 137 queued = queue_work(kcrypto_wq, &ctx->postponed);
137 BUG_ON(!queued); 138 BUG_ON(!queued);
138 139
139out: 140out:
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index d29e06b350ff..d14b22658d7a 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -12,30 +12,31 @@
12 12
13#include <crypto/algapi.h> 13#include <crypto/algapi.h>
14#include <crypto/internal/hash.h> 14#include <crypto/internal/hash.h>
15#include <crypto/cryptd.h>
16#include <crypto/crypto_wq.h>
15#include <linux/err.h> 17#include <linux/err.h>
16#include <linux/init.h> 18#include <linux/init.h>
17#include <linux/kernel.h> 19#include <linux/kernel.h>
18#include <linux/kthread.h>
19#include <linux/list.h> 20#include <linux/list.h>
20#include <linux/module.h> 21#include <linux/module.h>
21#include <linux/mutex.h>
22#include <linux/scatterlist.h> 22#include <linux/scatterlist.h>
23#include <linux/sched.h> 23#include <linux/sched.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/spinlock.h>
26 25
27#define CRYPTD_MAX_QLEN 100 26#define CRYPTD_MAX_CPU_QLEN 100
28 27
29struct cryptd_state { 28struct cryptd_cpu_queue {
30 spinlock_t lock;
31 struct mutex mutex;
32 struct crypto_queue queue; 29 struct crypto_queue queue;
33 struct task_struct *task; 30 struct work_struct work;
31};
32
33struct cryptd_queue {
34 struct cryptd_cpu_queue *cpu_queue;
34}; 35};
35 36
36struct cryptd_instance_ctx { 37struct cryptd_instance_ctx {
37 struct crypto_spawn spawn; 38 struct crypto_spawn spawn;
38 struct cryptd_state *state; 39 struct cryptd_queue *queue;
39}; 40};
40 41
41struct cryptd_blkcipher_ctx { 42struct cryptd_blkcipher_ctx {
@@ -54,11 +55,85 @@ struct cryptd_hash_request_ctx {
54 crypto_completion_t complete; 55 crypto_completion_t complete;
55}; 56};
56 57
57static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm) 58static void cryptd_queue_worker(struct work_struct *work);
59
60static int cryptd_init_queue(struct cryptd_queue *queue,
61 unsigned int max_cpu_qlen)
62{
63 int cpu;
64 struct cryptd_cpu_queue *cpu_queue;
65
66 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
67 if (!queue->cpu_queue)
68 return -ENOMEM;
69 for_each_possible_cpu(cpu) {
70 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
71 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
72 INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
73 }
74 return 0;
75}
76
77static void cryptd_fini_queue(struct cryptd_queue *queue)
78{
79 int cpu;
80 struct cryptd_cpu_queue *cpu_queue;
81
82 for_each_possible_cpu(cpu) {
83 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
84 BUG_ON(cpu_queue->queue.qlen);
85 }
86 free_percpu(queue->cpu_queue);
87}
88
89static int cryptd_enqueue_request(struct cryptd_queue *queue,
90 struct crypto_async_request *request)
91{
92 int cpu, err;
93 struct cryptd_cpu_queue *cpu_queue;
94
95 cpu = get_cpu();
96 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
97 err = crypto_enqueue_request(&cpu_queue->queue, request);
98 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
99 put_cpu();
100
101 return err;
102}
103
104/* Called in workqueue context, do one real cryption work (via
105 * req->complete) and reschedule itself if there are more work to
106 * do. */
107static void cryptd_queue_worker(struct work_struct *work)
108{
109 struct cryptd_cpu_queue *cpu_queue;
110 struct crypto_async_request *req, *backlog;
111
112 cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
113 /* Only handle one request at a time to avoid hogging crypto
114 * workqueue. preempt_disable/enable is used to prevent
115 * being preempted by cryptd_enqueue_request() */
116 preempt_disable();
117 backlog = crypto_get_backlog(&cpu_queue->queue);
118 req = crypto_dequeue_request(&cpu_queue->queue);
119 preempt_enable();
120
121 if (!req)
122 return;
123
124 if (backlog)
125 backlog->complete(backlog, -EINPROGRESS);
126 req->complete(req, 0);
127
128 if (cpu_queue->queue.qlen)
129 queue_work(kcrypto_wq, &cpu_queue->work);
130}
131
132static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
58{ 133{
59 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 134 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
60 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); 135 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
61 return ictx->state; 136 return ictx->queue;
62} 137}
63 138
64static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, 139static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
@@ -130,19 +205,13 @@ static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
130{ 205{
131 struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); 206 struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
132 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 207 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
133 struct cryptd_state *state = 208 struct cryptd_queue *queue;
134 cryptd_get_state(crypto_ablkcipher_tfm(tfm));
135 int err;
136 209
210 queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
137 rctx->complete = req->base.complete; 211 rctx->complete = req->base.complete;
138 req->base.complete = complete; 212 req->base.complete = complete;
139 213
140 spin_lock_bh(&state->lock); 214 return cryptd_enqueue_request(queue, &req->base);
141 err = ablkcipher_enqueue_request(&state->queue, req);
142 spin_unlock_bh(&state->lock);
143
144 wake_up_process(state->task);
145 return err;
146} 215}
147 216
148static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) 217static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
@@ -176,21 +245,12 @@ static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
176static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) 245static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
177{ 246{
178 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); 247 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
179 struct cryptd_state *state = cryptd_get_state(tfm);
180 int active;
181
182 mutex_lock(&state->mutex);
183 active = ablkcipher_tfm_in_queue(&state->queue,
184 __crypto_ablkcipher_cast(tfm));
185 mutex_unlock(&state->mutex);
186
187 BUG_ON(active);
188 248
189 crypto_free_blkcipher(ctx->child); 249 crypto_free_blkcipher(ctx->child);
190} 250}
191 251
192static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, 252static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg,
193 struct cryptd_state *state) 253 struct cryptd_queue *queue)
194{ 254{
195 struct crypto_instance *inst; 255 struct crypto_instance *inst;
196 struct cryptd_instance_ctx *ctx; 256 struct cryptd_instance_ctx *ctx;
@@ -213,7 +273,7 @@ static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg,
213 if (err) 273 if (err)
214 goto out_free_inst; 274 goto out_free_inst;
215 275
216 ctx->state = state; 276 ctx->queue = queue;
217 277
218 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); 278 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
219 279
@@ -231,7 +291,7 @@ out_free_inst:
231} 291}
232 292
233static struct crypto_instance *cryptd_alloc_blkcipher( 293static struct crypto_instance *cryptd_alloc_blkcipher(
234 struct rtattr **tb, struct cryptd_state *state) 294 struct rtattr **tb, struct cryptd_queue *queue)
235{ 295{
236 struct crypto_instance *inst; 296 struct crypto_instance *inst;
237 struct crypto_alg *alg; 297 struct crypto_alg *alg;
@@ -241,7 +301,7 @@ static struct crypto_instance *cryptd_alloc_blkcipher(
241 if (IS_ERR(alg)) 301 if (IS_ERR(alg))
242 return ERR_CAST(alg); 302 return ERR_CAST(alg);
243 303
244 inst = cryptd_alloc_instance(alg, state); 304 inst = cryptd_alloc_instance(alg, queue);
245 if (IS_ERR(inst)) 305 if (IS_ERR(inst))
246 goto out_put_alg; 306 goto out_put_alg;
247 307
@@ -289,15 +349,6 @@ static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
289static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) 349static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
290{ 350{
291 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); 351 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
292 struct cryptd_state *state = cryptd_get_state(tfm);
293 int active;
294
295 mutex_lock(&state->mutex);
296 active = ahash_tfm_in_queue(&state->queue,
297 __crypto_ahash_cast(tfm));
298 mutex_unlock(&state->mutex);
299
300 BUG_ON(active);
301 352
302 crypto_free_hash(ctx->child); 353 crypto_free_hash(ctx->child);
303} 354}
@@ -323,19 +374,13 @@ static int cryptd_hash_enqueue(struct ahash_request *req,
323{ 374{
324 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 375 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
325 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 376 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
326 struct cryptd_state *state = 377 struct cryptd_queue *queue =
327 cryptd_get_state(crypto_ahash_tfm(tfm)); 378 cryptd_get_queue(crypto_ahash_tfm(tfm));
328 int err;
329 379
330 rctx->complete = req->base.complete; 380 rctx->complete = req->base.complete;
331 req->base.complete = complete; 381 req->base.complete = complete;
332 382
333 spin_lock_bh(&state->lock); 383 return cryptd_enqueue_request(queue, &req->base);
334 err = ahash_enqueue_request(&state->queue, req);
335 spin_unlock_bh(&state->lock);
336
337 wake_up_process(state->task);
338 return err;
339} 384}
340 385
341static void cryptd_hash_init(struct crypto_async_request *req_async, int err) 386static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
@@ -468,7 +513,7 @@ static int cryptd_hash_digest_enqueue(struct ahash_request *req)
468} 513}
469 514
470static struct crypto_instance *cryptd_alloc_hash( 515static struct crypto_instance *cryptd_alloc_hash(
471 struct rtattr **tb, struct cryptd_state *state) 516 struct rtattr **tb, struct cryptd_queue *queue)
472{ 517{
473 struct crypto_instance *inst; 518 struct crypto_instance *inst;
474 struct crypto_alg *alg; 519 struct crypto_alg *alg;
@@ -478,7 +523,7 @@ static struct crypto_instance *cryptd_alloc_hash(
478 if (IS_ERR(alg)) 523 if (IS_ERR(alg))
479 return ERR_PTR(PTR_ERR(alg)); 524 return ERR_PTR(PTR_ERR(alg));
480 525
481 inst = cryptd_alloc_instance(alg, state); 526 inst = cryptd_alloc_instance(alg, queue);
482 if (IS_ERR(inst)) 527 if (IS_ERR(inst))
483 goto out_put_alg; 528 goto out_put_alg;
484 529
@@ -502,7 +547,7 @@ out_put_alg:
502 return inst; 547 return inst;
503} 548}
504 549
505static struct cryptd_state state; 550static struct cryptd_queue queue;
506 551
507static struct crypto_instance *cryptd_alloc(struct rtattr **tb) 552static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
508{ 553{
@@ -514,9 +559,9 @@ static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
514 559
515 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { 560 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
516 case CRYPTO_ALG_TYPE_BLKCIPHER: 561 case CRYPTO_ALG_TYPE_BLKCIPHER:
517 return cryptd_alloc_blkcipher(tb, &state); 562 return cryptd_alloc_blkcipher(tb, &queue);
518 case CRYPTO_ALG_TYPE_DIGEST: 563 case CRYPTO_ALG_TYPE_DIGEST:
519 return cryptd_alloc_hash(tb, &state); 564 return cryptd_alloc_hash(tb, &queue);
520 } 565 }
521 566
522 return ERR_PTR(-EINVAL); 567 return ERR_PTR(-EINVAL);
@@ -537,82 +582,58 @@ static struct crypto_template cryptd_tmpl = {
537 .module = THIS_MODULE, 582 .module = THIS_MODULE,
538}; 583};
539 584
540static inline int cryptd_create_thread(struct cryptd_state *state, 585struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
541 int (*fn)(void *data), const char *name) 586 u32 type, u32 mask)
542{ 587{
543 spin_lock_init(&state->lock); 588 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
544 mutex_init(&state->mutex); 589 struct crypto_ablkcipher *tfm;
545 crypto_init_queue(&state->queue, CRYPTD_MAX_QLEN); 590
546 591 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
547 state->task = kthread_run(fn, state, name); 592 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
548 if (IS_ERR(state->task)) 593 return ERR_PTR(-EINVAL);
549 return PTR_ERR(state->task); 594 tfm = crypto_alloc_ablkcipher(cryptd_alg_name, type, mask);
595 if (IS_ERR(tfm))
596 return ERR_CAST(tfm);
597 if (crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_module != THIS_MODULE) {
598 crypto_free_ablkcipher(tfm);
599 return ERR_PTR(-EINVAL);
600 }
550 601
551 return 0; 602 return __cryptd_ablkcipher_cast(tfm);
552} 603}
604EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
553 605
554static inline void cryptd_stop_thread(struct cryptd_state *state) 606struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
555{ 607{
556 BUG_ON(state->queue.qlen); 608 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
557 kthread_stop(state->task); 609 return ctx->child;
558} 610}
611EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
559 612
560static int cryptd_thread(void *data) 613void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
561{ 614{
562 struct cryptd_state *state = data; 615 crypto_free_ablkcipher(&tfm->base);
563 int stop;
564
565 current->flags |= PF_NOFREEZE;
566
567 do {
568 struct crypto_async_request *req, *backlog;
569
570 mutex_lock(&state->mutex);
571 __set_current_state(TASK_INTERRUPTIBLE);
572
573 spin_lock_bh(&state->lock);
574 backlog = crypto_get_backlog(&state->queue);
575 req = crypto_dequeue_request(&state->queue);
576 spin_unlock_bh(&state->lock);
577
578 stop = kthread_should_stop();
579
580 if (stop || req) {
581 __set_current_state(TASK_RUNNING);
582 if (req) {
583 if (backlog)
584 backlog->complete(backlog,
585 -EINPROGRESS);
586 req->complete(req, 0);
587 }
588 }
589
590 mutex_unlock(&state->mutex);
591
592 schedule();
593 } while (!stop);
594
595 return 0;
596} 616}
617EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
597 618
598static int __init cryptd_init(void) 619static int __init cryptd_init(void)
599{ 620{
600 int err; 621 int err;
601 622
602 err = cryptd_create_thread(&state, cryptd_thread, "cryptd"); 623 err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
603 if (err) 624 if (err)
604 return err; 625 return err;
605 626
606 err = crypto_register_template(&cryptd_tmpl); 627 err = crypto_register_template(&cryptd_tmpl);
607 if (err) 628 if (err)
608 kthread_stop(state.task); 629 cryptd_fini_queue(&queue);
609 630
610 return err; 631 return err;
611} 632}
612 633
613static void __exit cryptd_exit(void) 634static void __exit cryptd_exit(void)
614{ 635{
615 cryptd_stop_thread(&state); 636 cryptd_fini_queue(&queue);
616 crypto_unregister_template(&cryptd_tmpl); 637 crypto_unregister_template(&cryptd_tmpl);
617} 638}
618 639
diff --git a/crypto/crypto_wq.c b/crypto/crypto_wq.c
new file mode 100644
index 000000000000..fdcf6248f152
--- /dev/null
+++ b/crypto/crypto_wq.c
@@ -0,0 +1,38 @@
1/*
2 * Workqueue for crypto subsystem
3 *
4 * Copyright (c) 2009 Intel Corp.
5 * Author: Huang Ying <ying.huang@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 *
12 */
13
14#include <linux/workqueue.h>
15#include <crypto/algapi.h>
16#include <crypto/crypto_wq.h>
17
18struct workqueue_struct *kcrypto_wq;
19EXPORT_SYMBOL_GPL(kcrypto_wq);
20
21static int __init crypto_wq_init(void)
22{
23 kcrypto_wq = create_workqueue("crypto");
24 if (unlikely(!kcrypto_wq))
25 return -ENOMEM;
26 return 0;
27}
28
29static void __exit crypto_wq_exit(void)
30{
31 destroy_workqueue(kcrypto_wq);
32}
33
34module_init(crypto_wq_init);
35module_exit(crypto_wq_exit);
36
37MODULE_LICENSE("GPL");
38MODULE_DESCRIPTION("Workqueue for crypto subsystem");
diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
index ecbeaa1f17e1..a90d260528d4 100644
--- a/crypto/gf128mul.c
+++ b/crypto/gf128mul.c
@@ -4,7 +4,7 @@
4 * Copyright (c) 2006, Rik Snel <rsnel@cube.dyndns.org> 4 * Copyright (c) 2006, Rik Snel <rsnel@cube.dyndns.org>
5 * 5 *
6 * Based on Dr Brian Gladman's (GPL'd) work published at 6 * Based on Dr Brian Gladman's (GPL'd) work published at
7 * http://fp.gladman.plus.com/cryptography_technology/index.htm 7 * http://gladman.plushost.co.uk/oldsite/cryptography_technology/index.php
8 * See the original copyright notice below. 8 * See the original copyright notice below.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
diff --git a/crypto/internal.h b/crypto/internal.h
index 3c19a27a7563..fc76e1f37fc3 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -109,8 +109,10 @@ void crypto_alg_tested(const char *name, int err);
109void crypto_shoot_alg(struct crypto_alg *alg); 109void crypto_shoot_alg(struct crypto_alg *alg);
110struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, 110struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
111 u32 mask); 111 u32 mask);
112struct crypto_tfm *crypto_create_tfm(struct crypto_alg *alg, 112void *crypto_create_tfm(struct crypto_alg *alg,
113 const struct crypto_type *frontend); 113 const struct crypto_type *frontend);
114void *crypto_alloc_tfm(const char *alg_name,
115 const struct crypto_type *frontend, u32 type, u32 mask);
114 116
115int crypto_register_instance(struct crypto_template *tmpl, 117int crypto_register_instance(struct crypto_template *tmpl,
116 struct crypto_instance *inst); 118 struct crypto_instance *inst);
diff --git a/crypto/pcompress.c b/crypto/pcompress.c
new file mode 100644
index 000000000000..ca9a4af91efe
--- /dev/null
+++ b/crypto/pcompress.c
@@ -0,0 +1,97 @@
1/*
2 * Cryptographic API.
3 *
4 * Partial (de)compression operations.
5 *
6 * Copyright 2008 Sony Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program.
19 * If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include <linux/crypto.h>
23#include <linux/errno.h>
24#include <linux/module.h>
25#include <linux/seq_file.h>
26#include <linux/string.h>
27
28#include <crypto/compress.h>
29
30#include "internal.h"
31
32
33static int crypto_pcomp_init(struct crypto_tfm *tfm, u32 type, u32 mask)
34{
35 return 0;
36}
37
38static unsigned int crypto_pcomp_extsize(struct crypto_alg *alg,
39 const struct crypto_type *frontend)
40{
41 return alg->cra_ctxsize;
42}
43
44static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm,
45 const struct crypto_type *frontend)
46{
47 return 0;
48}
49
50static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg)
51 __attribute__ ((unused));
52static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg)
53{
54 seq_printf(m, "type : pcomp\n");
55}
56
57static const struct crypto_type crypto_pcomp_type = {
58 .extsize = crypto_pcomp_extsize,
59 .init = crypto_pcomp_init,
60 .init_tfm = crypto_pcomp_init_tfm,
61#ifdef CONFIG_PROC_FS
62 .show = crypto_pcomp_show,
63#endif
64 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
65 .maskset = CRYPTO_ALG_TYPE_MASK,
66 .type = CRYPTO_ALG_TYPE_PCOMPRESS,
67 .tfmsize = offsetof(struct crypto_pcomp, base),
68};
69
70struct crypto_pcomp *crypto_alloc_pcomp(const char *alg_name, u32 type,
71 u32 mask)
72{
73 return crypto_alloc_tfm(alg_name, &crypto_pcomp_type, type, mask);
74}
75EXPORT_SYMBOL_GPL(crypto_alloc_pcomp);
76
77int crypto_register_pcomp(struct pcomp_alg *alg)
78{
79 struct crypto_alg *base = &alg->base;
80
81 base->cra_type = &crypto_pcomp_type;
82 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
83 base->cra_flags |= CRYPTO_ALG_TYPE_PCOMPRESS;
84
85 return crypto_register_alg(base);
86}
87EXPORT_SYMBOL_GPL(crypto_register_pcomp);
88
89int crypto_unregister_pcomp(struct pcomp_alg *alg)
90{
91 return crypto_unregister_alg(&alg->base);
92}
93EXPORT_SYMBOL_GPL(crypto_unregister_pcomp);
94
95MODULE_LICENSE("GPL");
96MODULE_DESCRIPTION("Partial (de)compression type");
97MODULE_AUTHOR("Sony Corporation");
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index caa3542e6ce8..6349d8339d37 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -2,7 +2,7 @@
2 * Cryptographic API. 2 * Cryptographic API.
3 * 3 *
4 * SHA-256, as specified in 4 * SHA-256, as specified in
5 * http://csrc.nist.gov/cryptval/shs/sha256-384-512.pdf 5 * http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf
6 * 6 *
7 * SHA-256 code by Jean-Luc Cooke <jlcooke@certainkey.com>. 7 * SHA-256 code by Jean-Luc Cooke <jlcooke@certainkey.com>.
8 * 8 *
diff --git a/crypto/shash.c b/crypto/shash.c
index d5a2b619c55f..7a659733f94a 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -18,15 +18,10 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20 20
21static const struct crypto_type crypto_shash_type;
22
23static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm)
24{
25 return container_of(tfm, struct crypto_shash, base);
26}
27
28#include "internal.h" 21#include "internal.h"
29 22
23static const struct crypto_type crypto_shash_type;
24
30static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, 25static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
31 unsigned int keylen) 26 unsigned int keylen)
32{ 27{
@@ -282,8 +277,7 @@ static int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
282 if (!crypto_mod_get(calg)) 277 if (!crypto_mod_get(calg))
283 return -EAGAIN; 278 return -EAGAIN;
284 279
285 shash = __crypto_shash_cast(crypto_create_tfm( 280 shash = crypto_create_tfm(calg, &crypto_shash_type);
286 calg, &crypto_shash_type));
287 if (IS_ERR(shash)) { 281 if (IS_ERR(shash)) {
288 crypto_mod_put(calg); 282 crypto_mod_put(calg);
289 return PTR_ERR(shash); 283 return PTR_ERR(shash);
@@ -391,8 +385,7 @@ static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm)
391 if (!crypto_mod_get(calg)) 385 if (!crypto_mod_get(calg))
392 return -EAGAIN; 386 return -EAGAIN;
393 387
394 shash = __crypto_shash_cast(crypto_create_tfm( 388 shash = crypto_create_tfm(calg, &crypto_shash_type);
395 calg, &crypto_shash_type));
396 if (IS_ERR(shash)) { 389 if (IS_ERR(shash)) {
397 crypto_mod_put(calg); 390 crypto_mod_put(calg);
398 return PTR_ERR(shash); 391 return PTR_ERR(shash);
@@ -442,8 +435,6 @@ static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type,
442static int crypto_shash_init_tfm(struct crypto_tfm *tfm, 435static int crypto_shash_init_tfm(struct crypto_tfm *tfm,
443 const struct crypto_type *frontend) 436 const struct crypto_type *frontend)
444{ 437{
445 if (frontend->type != CRYPTO_ALG_TYPE_SHASH)
446 return -EINVAL;
447 return 0; 438 return 0;
448} 439}
449 440
@@ -482,8 +473,7 @@ static const struct crypto_type crypto_shash_type = {
482struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type, 473struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
483 u32 mask) 474 u32 mask)
484{ 475{
485 return __crypto_shash_cast( 476 return crypto_alloc_tfm(alg_name, &crypto_shash_type, type, mask);
486 crypto_alloc_tfm(alg_name, &crypto_shash_type, type, mask));
487} 477}
488EXPORT_SYMBOL_GPL(crypto_alloc_shash); 478EXPORT_SYMBOL_GPL(crypto_alloc_shash);
489 479
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 28a45a1e6f42..c3c9124209a1 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -53,7 +53,7 @@ static char *check[] = {
53 "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", 53 "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
54 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt", 54 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
55 "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320", 55 "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
56 "lzo", "cts", NULL 56 "lzo", "cts", "zlib", NULL
57}; 57};
58 58
59static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc, 59static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc,
@@ -661,6 +661,10 @@ static void do_test(int m)
661 tcrypt_test("ecb(seed)"); 661 tcrypt_test("ecb(seed)");
662 break; 662 break;
663 663
664 case 44:
665 tcrypt_test("zlib");
666 break;
667
664 case 100: 668 case 100:
665 tcrypt_test("hmac(md5)"); 669 tcrypt_test("hmac(md5)");
666 break; 670 break;
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index a75f11ffb957..b50c3c6b17a2 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -72,6 +72,13 @@ struct comp_test_suite {
72 } comp, decomp; 72 } comp, decomp;
73}; 73};
74 74
75struct pcomp_test_suite {
76 struct {
77 struct pcomp_testvec *vecs;
78 unsigned int count;
79 } comp, decomp;
80};
81
75struct hash_test_suite { 82struct hash_test_suite {
76 struct hash_testvec *vecs; 83 struct hash_testvec *vecs;
77 unsigned int count; 84 unsigned int count;
@@ -86,6 +93,7 @@ struct alg_test_desc {
86 struct aead_test_suite aead; 93 struct aead_test_suite aead;
87 struct cipher_test_suite cipher; 94 struct cipher_test_suite cipher;
88 struct comp_test_suite comp; 95 struct comp_test_suite comp;
96 struct pcomp_test_suite pcomp;
89 struct hash_test_suite hash; 97 struct hash_test_suite hash;
90 } suite; 98 } suite;
91}; 99};
@@ -898,6 +906,159 @@ out:
898 return ret; 906 return ret;
899} 907}
900 908
909static int test_pcomp(struct crypto_pcomp *tfm,
910 struct pcomp_testvec *ctemplate,
911 struct pcomp_testvec *dtemplate, int ctcount,
912 int dtcount)
913{
914 const char *algo = crypto_tfm_alg_driver_name(crypto_pcomp_tfm(tfm));
915 unsigned int i;
916 char result[COMP_BUF_SIZE];
917 int error;
918
919 for (i = 0; i < ctcount; i++) {
920 struct comp_request req;
921
922 error = crypto_compress_setup(tfm, ctemplate[i].params,
923 ctemplate[i].paramsize);
924 if (error) {
925 pr_err("alg: pcomp: compression setup failed on test "
926 "%d for %s: error=%d\n", i + 1, algo, error);
927 return error;
928 }
929
930 error = crypto_compress_init(tfm);
931 if (error) {
932 pr_err("alg: pcomp: compression init failed on test "
933 "%d for %s: error=%d\n", i + 1, algo, error);
934 return error;
935 }
936
937 memset(result, 0, sizeof(result));
938
939 req.next_in = ctemplate[i].input;
940 req.avail_in = ctemplate[i].inlen / 2;
941 req.next_out = result;
942 req.avail_out = ctemplate[i].outlen / 2;
943
944 error = crypto_compress_update(tfm, &req);
945 if (error && (error != -EAGAIN || req.avail_in)) {
946 pr_err("alg: pcomp: compression update failed on test "
947 "%d for %s: error=%d\n", i + 1, algo, error);
948 return error;
949 }
950
951 /* Add remaining input data */
952 req.avail_in += (ctemplate[i].inlen + 1) / 2;
953
954 error = crypto_compress_update(tfm, &req);
955 if (error && (error != -EAGAIN || req.avail_in)) {
956 pr_err("alg: pcomp: compression update failed on test "
957 "%d for %s: error=%d\n", i + 1, algo, error);
958 return error;
959 }
960
961 /* Provide remaining output space */
962 req.avail_out += COMP_BUF_SIZE - ctemplate[i].outlen / 2;
963
964 error = crypto_compress_final(tfm, &req);
965 if (error) {
966 pr_err("alg: pcomp: compression final failed on test "
967 "%d for %s: error=%d\n", i + 1, algo, error);
968 return error;
969 }
970
971 if (COMP_BUF_SIZE - req.avail_out != ctemplate[i].outlen) {
972 pr_err("alg: comp: Compression test %d failed for %s: "
973 "output len = %d (expected %d)\n", i + 1, algo,
974 COMP_BUF_SIZE - req.avail_out,
975 ctemplate[i].outlen);
976 return -EINVAL;
977 }
978
979 if (memcmp(result, ctemplate[i].output, ctemplate[i].outlen)) {
980 pr_err("alg: pcomp: Compression test %d failed for "
981 "%s\n", i + 1, algo);
982 hexdump(result, ctemplate[i].outlen);
983 return -EINVAL;
984 }
985 }
986
987 for (i = 0; i < dtcount; i++) {
988 struct comp_request req;
989
990 error = crypto_decompress_setup(tfm, dtemplate[i].params,
991 dtemplate[i].paramsize);
992 if (error) {
993 pr_err("alg: pcomp: decompression setup failed on "
994 "test %d for %s: error=%d\n", i + 1, algo,
995 error);
996 return error;
997 }
998
999 error = crypto_decompress_init(tfm);
1000 if (error) {
1001 pr_err("alg: pcomp: decompression init failed on test "
1002 "%d for %s: error=%d\n", i + 1, algo, error);
1003 return error;
1004 }
1005
1006 memset(result, 0, sizeof(result));
1007
1008 req.next_in = dtemplate[i].input;
1009 req.avail_in = dtemplate[i].inlen / 2;
1010 req.next_out = result;
1011 req.avail_out = dtemplate[i].outlen / 2;
1012
1013 error = crypto_decompress_update(tfm, &req);
1014 if (error && (error != -EAGAIN || req.avail_in)) {
1015 pr_err("alg: pcomp: decompression update failed on "
1016 "test %d for %s: error=%d\n", i + 1, algo,
1017 error);
1018 return error;
1019 }
1020
1021 /* Add remaining input data */
1022 req.avail_in += (dtemplate[i].inlen + 1) / 2;
1023
1024 error = crypto_decompress_update(tfm, &req);
1025 if (error && (error != -EAGAIN || req.avail_in)) {
1026 pr_err("alg: pcomp: decompression update failed on "
1027 "test %d for %s: error=%d\n", i + 1, algo,
1028 error);
1029 return error;
1030 }
1031
1032 /* Provide remaining output space */
1033 req.avail_out += COMP_BUF_SIZE - dtemplate[i].outlen / 2;
1034
1035 error = crypto_decompress_final(tfm, &req);
1036 if (error && (error != -EAGAIN || req.avail_in)) {
1037 pr_err("alg: pcomp: decompression final failed on "
1038 "test %d for %s: error=%d\n", i + 1, algo,
1039 error);
1040 return error;
1041 }
1042
1043 if (COMP_BUF_SIZE - req.avail_out != dtemplate[i].outlen) {
1044 pr_err("alg: comp: Decompression test %d failed for "
1045 "%s: output len = %d (expected %d)\n", i + 1,
1046 algo, COMP_BUF_SIZE - req.avail_out,
1047 dtemplate[i].outlen);
1048 return -EINVAL;
1049 }
1050
1051 if (memcmp(result, dtemplate[i].output, dtemplate[i].outlen)) {
1052 pr_err("alg: pcomp: Decompression test %d failed for "
1053 "%s\n", i + 1, algo);
1054 hexdump(result, dtemplate[i].outlen);
1055 return -EINVAL;
1056 }
1057 }
1058
1059 return 0;
1060}
1061
901static int alg_test_aead(const struct alg_test_desc *desc, const char *driver, 1062static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
902 u32 type, u32 mask) 1063 u32 type, u32 mask)
903{ 1064{
@@ -1007,6 +1168,28 @@ static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
1007 return err; 1168 return err;
1008} 1169}
1009 1170
1171static int alg_test_pcomp(const struct alg_test_desc *desc, const char *driver,
1172 u32 type, u32 mask)
1173{
1174 struct crypto_pcomp *tfm;
1175 int err;
1176
1177 tfm = crypto_alloc_pcomp(driver, type, mask);
1178 if (IS_ERR(tfm)) {
1179 pr_err("alg: pcomp: Failed to load transform for %s: %ld\n",
1180 driver, PTR_ERR(tfm));
1181 return PTR_ERR(tfm);
1182 }
1183
1184 err = test_pcomp(tfm, desc->suite.pcomp.comp.vecs,
1185 desc->suite.pcomp.decomp.vecs,
1186 desc->suite.pcomp.comp.count,
1187 desc->suite.pcomp.decomp.count);
1188
1189 crypto_free_pcomp(tfm);
1190 return err;
1191}
1192
1010static int alg_test_hash(const struct alg_test_desc *desc, const char *driver, 1193static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
1011 u32 type, u32 mask) 1194 u32 type, u32 mask)
1012{ 1195{
@@ -1835,6 +2018,21 @@ static const struct alg_test_desc alg_test_descs[] = {
1835 } 2018 }
1836 } 2019 }
1837 } 2020 }
2021 }, {
2022 .alg = "zlib",
2023 .test = alg_test_pcomp,
2024 .suite = {
2025 .pcomp = {
2026 .comp = {
2027 .vecs = zlib_comp_tv_template,
2028 .count = ZLIB_COMP_TEST_VECTORS
2029 },
2030 .decomp = {
2031 .vecs = zlib_decomp_tv_template,
2032 .count = ZLIB_DECOMP_TEST_VECTORS
2033 }
2034 }
2035 }
1838 } 2036 }
1839}; 2037};
1840 2038
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 132953e144d3..526f00a9c72f 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -15,6 +15,11 @@
15#ifndef _CRYPTO_TESTMGR_H 15#ifndef _CRYPTO_TESTMGR_H
16#define _CRYPTO_TESTMGR_H 16#define _CRYPTO_TESTMGR_H
17 17
18#include <linux/netlink.h>
19#include <linux/zlib.h>
20
21#include <crypto/compress.h>
22
18#define MAX_DIGEST_SIZE 64 23#define MAX_DIGEST_SIZE 64
19#define MAX_TAP 8 24#define MAX_TAP 8
20 25
@@ -8347,10 +8352,19 @@ struct comp_testvec {
8347 char output[COMP_BUF_SIZE]; 8352 char output[COMP_BUF_SIZE];
8348}; 8353};
8349 8354
8355struct pcomp_testvec {
8356 void *params;
8357 unsigned int paramsize;
8358 int inlen, outlen;
8359 char input[COMP_BUF_SIZE];
8360 char output[COMP_BUF_SIZE];
8361};
8362
8350/* 8363/*
8351 * Deflate test vectors (null-terminated strings). 8364 * Deflate test vectors (null-terminated strings).
8352 * Params: winbits=-11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL. 8365 * Params: winbits=-11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL.
8353 */ 8366 */
8367
8354#define DEFLATE_COMP_TEST_VECTORS 2 8368#define DEFLATE_COMP_TEST_VECTORS 2
8355#define DEFLATE_DECOMP_TEST_VECTORS 2 8369#define DEFLATE_DECOMP_TEST_VECTORS 2
8356 8370
@@ -8426,6 +8440,139 @@ static struct comp_testvec deflate_decomp_tv_template[] = {
8426 }, 8440 },
8427}; 8441};
8428 8442
8443#define ZLIB_COMP_TEST_VECTORS 2
8444#define ZLIB_DECOMP_TEST_VECTORS 2
8445
8446static const struct {
8447 struct nlattr nla;
8448 int val;
8449} deflate_comp_params[] = {
8450 {
8451 .nla = {
8452 .nla_len = NLA_HDRLEN + sizeof(int),
8453 .nla_type = ZLIB_COMP_LEVEL,
8454 },
8455 .val = Z_DEFAULT_COMPRESSION,
8456 }, {
8457 .nla = {
8458 .nla_len = NLA_HDRLEN + sizeof(int),
8459 .nla_type = ZLIB_COMP_METHOD,
8460 },
8461 .val = Z_DEFLATED,
8462 }, {
8463 .nla = {
8464 .nla_len = NLA_HDRLEN + sizeof(int),
8465 .nla_type = ZLIB_COMP_WINDOWBITS,
8466 },
8467 .val = -11,
8468 }, {
8469 .nla = {
8470 .nla_len = NLA_HDRLEN + sizeof(int),
8471 .nla_type = ZLIB_COMP_MEMLEVEL,
8472 },
8473 .val = MAX_MEM_LEVEL,
8474 }, {
8475 .nla = {
8476 .nla_len = NLA_HDRLEN + sizeof(int),
8477 .nla_type = ZLIB_COMP_STRATEGY,
8478 },
8479 .val = Z_DEFAULT_STRATEGY,
8480 }
8481};
8482
8483static const struct {
8484 struct nlattr nla;
8485 int val;
8486} deflate_decomp_params[] = {
8487 {
8488 .nla = {
8489 .nla_len = NLA_HDRLEN + sizeof(int),
8490 .nla_type = ZLIB_DECOMP_WINDOWBITS,
8491 },
8492 .val = -11,
8493 }
8494};
8495
8496static struct pcomp_testvec zlib_comp_tv_template[] = {
8497 {
8498 .params = &deflate_comp_params,
8499 .paramsize = sizeof(deflate_comp_params),
8500 .inlen = 70,
8501 .outlen = 38,
8502 .input = "Join us now and share the software "
8503 "Join us now and share the software ",
8504 .output = "\xf3\xca\xcf\xcc\x53\x28\x2d\x56"
8505 "\xc8\xcb\x2f\x57\x48\xcc\x4b\x51"
8506 "\x28\xce\x48\x2c\x4a\x55\x28\xc9"
8507 "\x48\x55\x28\xce\x4f\x2b\x29\x07"
8508 "\x71\xbc\x08\x2b\x01\x00",
8509 }, {
8510 .params = &deflate_comp_params,
8511 .paramsize = sizeof(deflate_comp_params),
8512 .inlen = 191,
8513 .outlen = 122,
8514 .input = "This document describes a compression method based on the DEFLATE"
8515 "compression algorithm. This document defines the application of "
8516 "the DEFLATE algorithm to the IP Payload Compression Protocol.",
8517 .output = "\x5d\x8d\x31\x0e\xc2\x30\x10\x04"
8518 "\xbf\xb2\x2f\xc8\x1f\x10\x04\x09"
8519 "\x89\xc2\x85\x3f\x70\xb1\x2f\xf8"
8520 "\x24\xdb\x67\xd9\x47\xc1\xef\x49"
8521 "\x68\x12\x51\xae\x76\x67\xd6\x27"
8522 "\x19\x88\x1a\xde\x85\xab\x21\xf2"
8523 "\x08\x5d\x16\x1e\x20\x04\x2d\xad"
8524 "\xf3\x18\xa2\x15\x85\x2d\x69\xc4"
8525 "\x42\x83\x23\xb6\x6c\x89\x71\x9b"
8526 "\xef\xcf\x8b\x9f\xcf\x33\xca\x2f"
8527 "\xed\x62\xa9\x4c\x80\xff\x13\xaf"
8528 "\x52\x37\xed\x0e\x52\x6b\x59\x02"
8529 "\xd9\x4e\xe8\x7a\x76\x1d\x02\x98"
8530 "\xfe\x8a\x87\x83\xa3\x4f\x56\x8a"
8531 "\xb8\x9e\x8e\x5c\x57\xd3\xa0\x79"
8532 "\xfa\x02",
8533 },
8534};
8535
8536static struct pcomp_testvec zlib_decomp_tv_template[] = {
8537 {
8538 .params = &deflate_decomp_params,
8539 .paramsize = sizeof(deflate_decomp_params),
8540 .inlen = 122,
8541 .outlen = 191,
8542 .input = "\x5d\x8d\x31\x0e\xc2\x30\x10\x04"
8543 "\xbf\xb2\x2f\xc8\x1f\x10\x04\x09"
8544 "\x89\xc2\x85\x3f\x70\xb1\x2f\xf8"
8545 "\x24\xdb\x67\xd9\x47\xc1\xef\x49"
8546 "\x68\x12\x51\xae\x76\x67\xd6\x27"
8547 "\x19\x88\x1a\xde\x85\xab\x21\xf2"
8548 "\x08\x5d\x16\x1e\x20\x04\x2d\xad"
8549 "\xf3\x18\xa2\x15\x85\x2d\x69\xc4"
8550 "\x42\x83\x23\xb6\x6c\x89\x71\x9b"
8551 "\xef\xcf\x8b\x9f\xcf\x33\xca\x2f"
8552 "\xed\x62\xa9\x4c\x80\xff\x13\xaf"
8553 "\x52\x37\xed\x0e\x52\x6b\x59\x02"
8554 "\xd9\x4e\xe8\x7a\x76\x1d\x02\x98"
8555 "\xfe\x8a\x87\x83\xa3\x4f\x56\x8a"
8556 "\xb8\x9e\x8e\x5c\x57\xd3\xa0\x79"
8557 "\xfa\x02",
8558 .output = "This document describes a compression method based on the DEFLATE"
8559 "compression algorithm. This document defines the application of "
8560 "the DEFLATE algorithm to the IP Payload Compression Protocol.",
8561 }, {
8562 .params = &deflate_decomp_params,
8563 .paramsize = sizeof(deflate_decomp_params),
8564 .inlen = 38,
8565 .outlen = 70,
8566 .input = "\xf3\xca\xcf\xcc\x53\x28\x2d\x56"
8567 "\xc8\xcb\x2f\x57\x48\xcc\x4b\x51"
8568 "\x28\xce\x48\x2c\x4a\x55\x28\xc9"
8569 "\x48\x55\x28\xce\x4f\x2b\x29\x07"
8570 "\x71\xbc\x08\x2b\x01\x00",
8571 .output = "Join us now and share the software "
8572 "Join us now and share the software ",
8573 },
8574};
8575
8429/* 8576/*
8430 * LZO test vectors (null-terminated strings). 8577 * LZO test vectors (null-terminated strings).
8431 */ 8578 */
diff --git a/crypto/zlib.c b/crypto/zlib.c
new file mode 100644
index 000000000000..33609bab614e
--- /dev/null
+++ b/crypto/zlib.c
@@ -0,0 +1,378 @@
1/*
2 * Cryptographic API.
3 *
4 * Zlib algorithm
5 *
6 * Copyright 2008 Sony Corporation
7 *
8 * Based on deflate.c, which is
9 * Copyright (c) 2003 James Morris <jmorris@intercode.com.au>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the Free
13 * Software Foundation; either version 2 of the License, or (at your option)
14 * any later version.
15 *
16 * FIXME: deflate transforms will require up to a total of about 436k of kernel
17 * memory on i386 (390k for compression, the rest for decompression), as the
18 * current zlib kernel code uses a worst case pre-allocation system by default.
19 * This needs to be fixed so that the amount of memory required is properly
20 * related to the winbits and memlevel parameters.
21 */
22
23#define pr_fmt(fmt) "%s: " fmt, __func__
24
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/zlib.h>
28#include <linux/vmalloc.h>
29#include <linux/interrupt.h>
30#include <linux/mm.h>
31#include <linux/net.h>
32#include <linux/slab.h>
33
34#include <crypto/internal/compress.h>
35
36#include <net/netlink.h>
37
38
39struct zlib_ctx {
40 struct z_stream_s comp_stream;
41 struct z_stream_s decomp_stream;
42 int decomp_windowBits;
43};
44
45
46static void zlib_comp_exit(struct zlib_ctx *ctx)
47{
48 struct z_stream_s *stream = &ctx->comp_stream;
49
50 if (stream->workspace) {
51 zlib_deflateEnd(stream);
52 vfree(stream->workspace);
53 stream->workspace = NULL;
54 }
55}
56
57static void zlib_decomp_exit(struct zlib_ctx *ctx)
58{
59 struct z_stream_s *stream = &ctx->decomp_stream;
60
61 if (stream->workspace) {
62 zlib_inflateEnd(stream);
63 kfree(stream->workspace);
64 stream->workspace = NULL;
65 }
66}
67
68static int zlib_init(struct crypto_tfm *tfm)
69{
70 return 0;
71}
72
73static void zlib_exit(struct crypto_tfm *tfm)
74{
75 struct zlib_ctx *ctx = crypto_tfm_ctx(tfm);
76
77 zlib_comp_exit(ctx);
78 zlib_decomp_exit(ctx);
79}
80
81
82static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params,
83 unsigned int len)
84{
85 struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
86 struct z_stream_s *stream = &ctx->comp_stream;
87 struct nlattr *tb[ZLIB_COMP_MAX + 1];
88 size_t workspacesize;
89 int ret;
90
91 ret = nla_parse(tb, ZLIB_COMP_MAX, params, len, NULL);
92 if (ret)
93 return ret;
94
95 zlib_comp_exit(ctx);
96
97 workspacesize = zlib_deflate_workspacesize();
98 stream->workspace = vmalloc(workspacesize);
99 if (!stream->workspace)
100 return -ENOMEM;
101
102 memset(stream->workspace, 0, workspacesize);
103 ret = zlib_deflateInit2(stream,
104 tb[ZLIB_COMP_LEVEL]
105 ? nla_get_u32(tb[ZLIB_COMP_LEVEL])
106 : Z_DEFAULT_COMPRESSION,
107 tb[ZLIB_COMP_METHOD]
108 ? nla_get_u32(tb[ZLIB_COMP_METHOD])
109 : Z_DEFLATED,
110 tb[ZLIB_COMP_WINDOWBITS]
111 ? nla_get_u32(tb[ZLIB_COMP_WINDOWBITS])
112 : MAX_WBITS,
113 tb[ZLIB_COMP_MEMLEVEL]
114 ? nla_get_u32(tb[ZLIB_COMP_MEMLEVEL])
115 : DEF_MEM_LEVEL,
116 tb[ZLIB_COMP_STRATEGY]
117 ? nla_get_u32(tb[ZLIB_COMP_STRATEGY])
118 : Z_DEFAULT_STRATEGY);
119 if (ret != Z_OK) {
120 vfree(stream->workspace);
121 stream->workspace = NULL;
122 return -EINVAL;
123 }
124
125 return 0;
126}
127
128static int zlib_compress_init(struct crypto_pcomp *tfm)
129{
130 int ret;
131 struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
132 struct z_stream_s *stream = &dctx->comp_stream;
133
134 ret = zlib_deflateReset(stream);
135 if (ret != Z_OK)
136 return -EINVAL;
137
138 return 0;
139}
140
141static int zlib_compress_update(struct crypto_pcomp *tfm,
142 struct comp_request *req)
143{
144 int ret;
145 struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
146 struct z_stream_s *stream = &dctx->comp_stream;
147
148 pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out);
149 stream->next_in = req->next_in;
150 stream->avail_in = req->avail_in;
151 stream->next_out = req->next_out;
152 stream->avail_out = req->avail_out;
153
154 ret = zlib_deflate(stream, Z_NO_FLUSH);
155 switch (ret) {
156 case Z_OK:
157 break;
158
159 case Z_BUF_ERROR:
160 pr_debug("zlib_deflate could not make progress\n");
161 return -EAGAIN;
162
163 default:
164 pr_debug("zlib_deflate failed %d\n", ret);
165 return -EINVAL;
166 }
167
168 pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n",
169 stream->avail_in, stream->avail_out,
170 req->avail_in - stream->avail_in,
171 req->avail_out - stream->avail_out);
172 req->next_in = stream->next_in;
173 req->avail_in = stream->avail_in;
174 req->next_out = stream->next_out;
175 req->avail_out = stream->avail_out;
176 return 0;
177}
178
179static int zlib_compress_final(struct crypto_pcomp *tfm,
180 struct comp_request *req)
181{
182 int ret;
183 struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
184 struct z_stream_s *stream = &dctx->comp_stream;
185
186 pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out);
187 stream->next_in = req->next_in;
188 stream->avail_in = req->avail_in;
189 stream->next_out = req->next_out;
190 stream->avail_out = req->avail_out;
191
192 ret = zlib_deflate(stream, Z_FINISH);
193 if (ret != Z_STREAM_END) {
194 pr_debug("zlib_deflate failed %d\n", ret);
195 return -EINVAL;
196 }
197
198 pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n",
199 stream->avail_in, stream->avail_out,
200 req->avail_in - stream->avail_in,
201 req->avail_out - stream->avail_out);
202 req->next_in = stream->next_in;
203 req->avail_in = stream->avail_in;
204 req->next_out = stream->next_out;
205 req->avail_out = stream->avail_out;
206 return 0;
207}
208
209
210static int zlib_decompress_setup(struct crypto_pcomp *tfm, void *params,
211 unsigned int len)
212{
213 struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
214 struct z_stream_s *stream = &ctx->decomp_stream;
215 struct nlattr *tb[ZLIB_DECOMP_MAX + 1];
216 int ret = 0;
217
218 ret = nla_parse(tb, ZLIB_DECOMP_MAX, params, len, NULL);
219 if (ret)
220 return ret;
221
222 zlib_decomp_exit(ctx);
223
224 ctx->decomp_windowBits = tb[ZLIB_DECOMP_WINDOWBITS]
225 ? nla_get_u32(tb[ZLIB_DECOMP_WINDOWBITS])
226 : DEF_WBITS;
227
228 stream->workspace = kzalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
229 if (!stream->workspace)
230 return -ENOMEM;
231
232 ret = zlib_inflateInit2(stream, ctx->decomp_windowBits);
233 if (ret != Z_OK) {
234 kfree(stream->workspace);
235 stream->workspace = NULL;
236 return -EINVAL;
237 }
238
239 return 0;
240}
241
242static int zlib_decompress_init(struct crypto_pcomp *tfm)
243{
244 int ret;
245 struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
246 struct z_stream_s *stream = &dctx->decomp_stream;
247
248 ret = zlib_inflateReset(stream);
249 if (ret != Z_OK)
250 return -EINVAL;
251
252 return 0;
253}
254
255static int zlib_decompress_update(struct crypto_pcomp *tfm,
256 struct comp_request *req)
257{
258 int ret;
259 struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
260 struct z_stream_s *stream = &dctx->decomp_stream;
261
262 pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out);
263 stream->next_in = req->next_in;
264 stream->avail_in = req->avail_in;
265 stream->next_out = req->next_out;
266 stream->avail_out = req->avail_out;
267
268 ret = zlib_inflate(stream, Z_SYNC_FLUSH);
269 switch (ret) {
270 case Z_OK:
271 case Z_STREAM_END:
272 break;
273
274 case Z_BUF_ERROR:
275 pr_debug("zlib_inflate could not make progress\n");
276 return -EAGAIN;
277
278 default:
279 pr_debug("zlib_inflate failed %d\n", ret);
280 return -EINVAL;
281 }
282
283 pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n",
284 stream->avail_in, stream->avail_out,
285 req->avail_in - stream->avail_in,
286 req->avail_out - stream->avail_out);
287 req->next_in = stream->next_in;
288 req->avail_in = stream->avail_in;
289 req->next_out = stream->next_out;
290 req->avail_out = stream->avail_out;
291 return 0;
292}
293
294static int zlib_decompress_final(struct crypto_pcomp *tfm,
295 struct comp_request *req)
296{
297 int ret;
298 struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
299 struct z_stream_s *stream = &dctx->decomp_stream;
300
301 pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out);
302 stream->next_in = req->next_in;
303 stream->avail_in = req->avail_in;
304 stream->next_out = req->next_out;
305 stream->avail_out = req->avail_out;
306
307 if (dctx->decomp_windowBits < 0) {
308 ret = zlib_inflate(stream, Z_SYNC_FLUSH);
309 /*
310 * Work around a bug in zlib, which sometimes wants to taste an
311 * extra byte when being used in the (undocumented) raw deflate
312 * mode. (From USAGI).
313 */
314 if (ret == Z_OK && !stream->avail_in && stream->avail_out) {
315 const void *saved_next_in = stream->next_in;
316 u8 zerostuff = 0;
317
318 stream->next_in = &zerostuff;
319 stream->avail_in = 1;
320 ret = zlib_inflate(stream, Z_FINISH);
321 stream->next_in = saved_next_in;
322 stream->avail_in = 0;
323 }
324 } else
325 ret = zlib_inflate(stream, Z_FINISH);
326 if (ret != Z_STREAM_END) {
327 pr_debug("zlib_inflate failed %d\n", ret);
328 return -EINVAL;
329 }
330
331 pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n",
332 stream->avail_in, stream->avail_out,
333 req->avail_in - stream->avail_in,
334 req->avail_out - stream->avail_out);
335 req->next_in = stream->next_in;
336 req->avail_in = stream->avail_in;
337 req->next_out = stream->next_out;
338 req->avail_out = stream->avail_out;
339 return 0;
340}
341
342
343static struct pcomp_alg zlib_alg = {
344 .compress_setup = zlib_compress_setup,
345 .compress_init = zlib_compress_init,
346 .compress_update = zlib_compress_update,
347 .compress_final = zlib_compress_final,
348 .decompress_setup = zlib_decompress_setup,
349 .decompress_init = zlib_decompress_init,
350 .decompress_update = zlib_decompress_update,
351 .decompress_final = zlib_decompress_final,
352
353 .base = {
354 .cra_name = "zlib",
355 .cra_flags = CRYPTO_ALG_TYPE_PCOMPRESS,
356 .cra_ctxsize = sizeof(struct zlib_ctx),
357 .cra_module = THIS_MODULE,
358 .cra_init = zlib_init,
359 .cra_exit = zlib_exit,
360 }
361};
362
363static int __init zlib_mod_init(void)
364{
365 return crypto_register_pcomp(&zlib_alg);
366}
367
368static void __exit zlib_mod_fini(void)
369{
370 crypto_unregister_pcomp(&zlib_alg);
371}
372
373module_init(zlib_mod_init);
374module_exit(zlib_mod_fini);
375
376MODULE_LICENSE("GPL");
377MODULE_DESCRIPTION("Zlib Compression Algorithm");
378MODULE_AUTHOR("Sony Corporation");