diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /crypto | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/Kconfig | 61 | ||||
-rw-r--r-- | crypto/Makefile | 19 | ||||
-rw-r--r-- | crypto/ablkcipher.c | 3 | ||||
-rw-r--r-- | crypto/af_alg.c | 483 | ||||
-rw-r--r-- | crypto/algif_hash.c | 319 | ||||
-rw-r--r-- | crypto/algif_skcipher.c | 632 | ||||
-rw-r--r-- | crypto/ansi_cprng.c | 2 | ||||
-rw-r--r-- | crypto/async_tx/Kconfig | 13 | ||||
-rw-r--r-- | crypto/async_tx/async_memcpy.c | 2 | ||||
-rw-r--r-- | crypto/async_tx/async_xor.c | 2 | ||||
-rw-r--r-- | crypto/authenc.c | 22 | ||||
-rw-r--r-- | crypto/authencesn.c | 835 | ||||
-rw-r--r-- | crypto/blkcipher.c | 2 | ||||
-rw-r--r-- | crypto/cast5.c | 74 | ||||
-rw-r--r-- | crypto/cryptd.c | 206 | ||||
-rw-r--r-- | crypto/crypto_wq.c | 3 | ||||
-rw-r--r-- | crypto/deflate.c | 11 | ||||
-rw-r--r-- | crypto/des_generic.c | 130 | ||||
-rw-r--r-- | crypto/eseqiv.c | 18 | ||||
-rw-r--r-- | crypto/gcm.c | 19 | ||||
-rw-r--r-- | crypto/gf128mul.c | 2 | ||||
-rw-r--r-- | crypto/pcrypt.c | 4 | ||||
-rw-r--r-- | crypto/rmd128.c | 3 | ||||
-rw-r--r-- | crypto/rmd160.c | 3 | ||||
-rw-r--r-- | crypto/rmd256.c | 3 | ||||
-rw-r--r-- | crypto/rmd320.c | 3 | ||||
-rw-r--r-- | crypto/shash.c | 8 | ||||
-rw-r--r-- | crypto/tcrypt.c | 18 | ||||
-rw-r--r-- | crypto/testmgr.c | 42 | ||||
-rw-r--r-- | crypto/testmgr.h | 451 | ||||
-rw-r--r-- | crypto/vmac.c | 2 | ||||
-rw-r--r-- | crypto/xts.c | 2 | ||||
-rw-r--r-- | crypto/zlib.c | 28 |
33 files changed, 3174 insertions, 251 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig index e573077f1672..87b22ca9c223 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
@@ -23,13 +23,12 @@ comment "Crypto core or helper" | |||
23 | 23 | ||
24 | config CRYPTO_FIPS | 24 | config CRYPTO_FIPS |
25 | bool "FIPS 200 compliance" | 25 | bool "FIPS 200 compliance" |
26 | depends on CRYPTO_ANSI_CPRNG | 26 | depends on CRYPTO_ANSI_CPRNG && !CRYPTO_MANAGER_DISABLE_TESTS |
27 | help | 27 | help |
28 | This options enables the fips boot option which is | 28 | This options enables the fips boot option which is |
29 | required if you want to system to operate in a FIPS 200 | 29 | required if you want to system to operate in a FIPS 200 |
30 | certification. You should say no unless you know what | 30 | certification. You should say no unless you know what |
31 | this is. Note that CRYPTO_ANSI_CPRNG is required if this | 31 | this is. |
32 | option is selected | ||
33 | 32 | ||
34 | config CRYPTO_ALGAPI | 33 | config CRYPTO_ALGAPI |
35 | tristate | 34 | tristate |
@@ -111,7 +110,6 @@ config CRYPTO_MANAGER_DISABLE_TESTS | |||
111 | 110 | ||
112 | config CRYPTO_GF128MUL | 111 | config CRYPTO_GF128MUL |
113 | tristate "GF(2^128) multiplication functions (EXPERIMENTAL)" | 112 | tristate "GF(2^128) multiplication functions (EXPERIMENTAL)" |
114 | depends on EXPERIMENTAL | ||
115 | help | 113 | help |
116 | Efficient table driven implementation of multiplications in the | 114 | Efficient table driven implementation of multiplications in the |
117 | field GF(2^128). This is needed by some cypher modes. This | 115 | field GF(2^128). This is needed by some cypher modes. This |
@@ -266,11 +264,6 @@ config CRYPTO_XTS | |||
266 | key size 256, 384 or 512 bits. This implementation currently | 264 | key size 256, 384 or 512 bits. This implementation currently |
267 | can't handle a sectorsize which is not a multiple of 16 bytes. | 265 | can't handle a sectorsize which is not a multiple of 16 bytes. |
268 | 266 | ||
269 | config CRYPTO_FPU | ||
270 | tristate | ||
271 | select CRYPTO_BLKCIPHER | ||
272 | select CRYPTO_MANAGER | ||
273 | |||
274 | comment "Hash modes" | 267 | comment "Hash modes" |
275 | 268 | ||
276 | config CRYPTO_HMAC | 269 | config CRYPTO_HMAC |
@@ -365,7 +358,7 @@ config CRYPTO_RMD128 | |||
365 | RIPEMD-160 should be used. | 358 | RIPEMD-160 should be used. |
366 | 359 | ||
367 | Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. | 360 | Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. |
368 | See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html> | 361 | See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html> |
369 | 362 | ||
370 | config CRYPTO_RMD160 | 363 | config CRYPTO_RMD160 |
371 | tristate "RIPEMD-160 digest algorithm" | 364 | tristate "RIPEMD-160 digest algorithm" |
@@ -382,7 +375,7 @@ config CRYPTO_RMD160 | |||
382 | against RIPEMD-160. | 375 | against RIPEMD-160. |
383 | 376 | ||
384 | Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. | 377 | Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. |
385 | See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html> | 378 | See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html> |
386 | 379 | ||
387 | config CRYPTO_RMD256 | 380 | config CRYPTO_RMD256 |
388 | tristate "RIPEMD-256 digest algorithm" | 381 | tristate "RIPEMD-256 digest algorithm" |
@@ -394,7 +387,7 @@ config CRYPTO_RMD256 | |||
394 | (than RIPEMD-128). | 387 | (than RIPEMD-128). |
395 | 388 | ||
396 | Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. | 389 | Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. |
397 | See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html> | 390 | See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html> |
398 | 391 | ||
399 | config CRYPTO_RMD320 | 392 | config CRYPTO_RMD320 |
400 | tristate "RIPEMD-320 digest algorithm" | 393 | tristate "RIPEMD-320 digest algorithm" |
@@ -406,7 +399,7 @@ config CRYPTO_RMD320 | |||
406 | (than RIPEMD-160). | 399 | (than RIPEMD-160). |
407 | 400 | ||
408 | Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. | 401 | Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. |
409 | See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html> | 402 | See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html> |
410 | 403 | ||
411 | config CRYPTO_SHA1 | 404 | config CRYPTO_SHA1 |
412 | tristate "SHA1 digest algorithm" | 405 | tristate "SHA1 digest algorithm" |
@@ -461,7 +454,7 @@ config CRYPTO_WP512 | |||
461 | Whirlpool will be part of the ISO/IEC 10118-3:2003(E) standard | 454 | Whirlpool will be part of the ISO/IEC 10118-3:2003(E) standard |
462 | 455 | ||
463 | See also: | 456 | See also: |
464 | <http://planeta.terra.com.br/informatica/paulobarreto/WhirlpoolPage.html> | 457 | <http://www.larc.usp.br/~pbarreto/WhirlpoolPage.html> |
465 | 458 | ||
466 | config CRYPTO_GHASH_CLMUL_NI_INTEL | 459 | config CRYPTO_GHASH_CLMUL_NI_INTEL |
467 | tristate "GHASH digest algorithm (CLMUL-NI accelerated)" | 460 | tristate "GHASH digest algorithm (CLMUL-NI accelerated)" |
@@ -540,11 +533,11 @@ config CRYPTO_AES_X86_64 | |||
540 | 533 | ||
541 | config CRYPTO_AES_NI_INTEL | 534 | config CRYPTO_AES_NI_INTEL |
542 | tristate "AES cipher algorithms (AES-NI)" | 535 | tristate "AES cipher algorithms (AES-NI)" |
543 | depends on (X86 || UML_X86) && 64BIT | 536 | depends on (X86 || UML_X86) |
544 | select CRYPTO_AES_X86_64 | 537 | select CRYPTO_AES_X86_64 if 64BIT |
538 | select CRYPTO_AES_586 if !64BIT | ||
545 | select CRYPTO_CRYPTD | 539 | select CRYPTO_CRYPTD |
546 | select CRYPTO_ALGAPI | 540 | select CRYPTO_ALGAPI |
547 | select CRYPTO_FPU | ||
548 | help | 541 | help |
549 | Use Intel AES-NI instructions for AES algorithm. | 542 | Use Intel AES-NI instructions for AES algorithm. |
550 | 543 | ||
@@ -564,9 +557,10 @@ config CRYPTO_AES_NI_INTEL | |||
564 | 557 | ||
565 | See <http://csrc.nist.gov/encryption/aes/> for more information. | 558 | See <http://csrc.nist.gov/encryption/aes/> for more information. |
566 | 559 | ||
567 | In addition to AES cipher algorithm support, the | 560 | In addition to AES cipher algorithm support, the acceleration |
568 | acceleration for some popular block cipher mode is supported | 561 | for some popular block cipher mode is supported too, including |
569 | too, including ECB, CBC, CTR, LRW, PCBC, XTS. | 562 | ECB, CBC, LRW, PCBC, XTS. The 64 bit version has additional |
563 | acceleration for CTR. | ||
570 | 564 | ||
571 | config CRYPTO_ANUBIS | 565 | config CRYPTO_ANUBIS |
572 | tristate "Anubis cipher algorithm" | 566 | tristate "Anubis cipher algorithm" |
@@ -579,8 +573,8 @@ config CRYPTO_ANUBIS | |||
579 | in the NESSIE competition. | 573 | in the NESSIE competition. |
580 | 574 | ||
581 | See also: | 575 | See also: |
582 | <https://www.cosic.esat.kuleuven.ac.be/nessie/reports/> | 576 | <https://www.cosic.esat.kuleuven.be/nessie/reports/> |
583 | <http://planeta.terra.com.br/informatica/paulobarreto/AnubisPage.html> | 577 | <http://www.larc.usp.br/~pbarreto/AnubisPage.html> |
584 | 578 | ||
585 | config CRYPTO_ARC4 | 579 | config CRYPTO_ARC4 |
586 | tristate "ARC4 cipher algorithm" | 580 | tristate "ARC4 cipher algorithm" |
@@ -659,7 +653,7 @@ config CRYPTO_KHAZAD | |||
659 | on 32-bit processors. Khazad uses an 128 bit key size. | 653 | on 32-bit processors. Khazad uses an 128 bit key size. |
660 | 654 | ||
661 | See also: | 655 | See also: |
662 | <http://planeta.terra.com.br/informatica/paulobarreto/KhazadPage.html> | 656 | <http://www.larc.usp.br/~pbarreto/KhazadPage.html> |
663 | 657 | ||
664 | config CRYPTO_SALSA20 | 658 | config CRYPTO_SALSA20 |
665 | tristate "Salsa20 stream cipher algorithm (EXPERIMENTAL)" | 659 | tristate "Salsa20 stream cipher algorithm (EXPERIMENTAL)" |
@@ -842,6 +836,27 @@ config CRYPTO_ANSI_CPRNG | |||
842 | ANSI X9.31 A.2.4. Note that this option must be enabled if | 836 | ANSI X9.31 A.2.4. Note that this option must be enabled if |
843 | CRYPTO_FIPS is selected | 837 | CRYPTO_FIPS is selected |
844 | 838 | ||
839 | config CRYPTO_USER_API | ||
840 | tristate | ||
841 | |||
842 | config CRYPTO_USER_API_HASH | ||
843 | tristate "User-space interface for hash algorithms" | ||
844 | depends on NET | ||
845 | select CRYPTO_HASH | ||
846 | select CRYPTO_USER_API | ||
847 | help | ||
848 | This option enables the user-spaces interface for hash | ||
849 | algorithms. | ||
850 | |||
851 | config CRYPTO_USER_API_SKCIPHER | ||
852 | tristate "User-space interface for symmetric key cipher algorithms" | ||
853 | depends on NET | ||
854 | select CRYPTO_BLKCIPHER | ||
855 | select CRYPTO_USER_API | ||
856 | help | ||
857 | This option enables the user-spaces interface for symmetric | ||
858 | key cipher algorithms. | ||
859 | |||
845 | source "drivers/crypto/Kconfig" | 860 | source "drivers/crypto/Kconfig" |
846 | 861 | ||
847 | endif # if CRYPTO | 862 | endif # if CRYPTO |
diff --git a/crypto/Makefile b/crypto/Makefile index 423b7de61f93..ce5a813d3639 100644 --- a/crypto/Makefile +++ b/crypto/Makefile | |||
@@ -3,32 +3,32 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_CRYPTO) += crypto.o | 5 | obj-$(CONFIG_CRYPTO) += crypto.o |
6 | crypto-objs := api.o cipher.o compress.o | 6 | crypto-y := api.o cipher.o compress.o |
7 | 7 | ||
8 | obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o | 8 | obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o |
9 | 9 | ||
10 | obj-$(CONFIG_CRYPTO_FIPS) += fips.o | 10 | obj-$(CONFIG_CRYPTO_FIPS) += fips.o |
11 | 11 | ||
12 | crypto_algapi-$(CONFIG_PROC_FS) += proc.o | 12 | crypto_algapi-$(CONFIG_PROC_FS) += proc.o |
13 | crypto_algapi-objs := algapi.o scatterwalk.o $(crypto_algapi-y) | 13 | crypto_algapi-y := algapi.o scatterwalk.o $(crypto_algapi-y) |
14 | obj-$(CONFIG_CRYPTO_ALGAPI2) += crypto_algapi.o | 14 | obj-$(CONFIG_CRYPTO_ALGAPI2) += crypto_algapi.o |
15 | 15 | ||
16 | obj-$(CONFIG_CRYPTO_AEAD2) += aead.o | 16 | obj-$(CONFIG_CRYPTO_AEAD2) += aead.o |
17 | 17 | ||
18 | crypto_blkcipher-objs := ablkcipher.o | 18 | crypto_blkcipher-y := ablkcipher.o |
19 | crypto_blkcipher-objs += blkcipher.o | 19 | crypto_blkcipher-y += blkcipher.o |
20 | obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o | 20 | obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o |
21 | obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o | 21 | obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o |
22 | obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o | 22 | obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o |
23 | obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o | 23 | obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o |
24 | 24 | ||
25 | crypto_hash-objs += ahash.o | 25 | crypto_hash-y += ahash.o |
26 | crypto_hash-objs += shash.o | 26 | crypto_hash-y += shash.o |
27 | obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o | 27 | obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o |
28 | 28 | ||
29 | obj-$(CONFIG_CRYPTO_PCOMP2) += pcompress.o | 29 | obj-$(CONFIG_CRYPTO_PCOMP2) += pcompress.o |
30 | 30 | ||
31 | cryptomgr-objs := algboss.o testmgr.o | 31 | cryptomgr-y := algboss.o testmgr.o |
32 | 32 | ||
33 | obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o | 33 | obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o |
34 | obj-$(CONFIG_CRYPTO_HMAC) += hmac.o | 34 | obj-$(CONFIG_CRYPTO_HMAC) += hmac.o |
@@ -78,13 +78,16 @@ obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o | |||
78 | obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o | 78 | obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o |
79 | obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o | 79 | obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o |
80 | obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o | 80 | obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o |
81 | obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o | 81 | obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o |
82 | obj-$(CONFIG_CRYPTO_LZO) += lzo.o | 82 | obj-$(CONFIG_CRYPTO_LZO) += lzo.o |
83 | obj-$(CONFIG_CRYPTO_RNG2) += rng.o | 83 | obj-$(CONFIG_CRYPTO_RNG2) += rng.o |
84 | obj-$(CONFIG_CRYPTO_RNG2) += krng.o | 84 | obj-$(CONFIG_CRYPTO_RNG2) += krng.o |
85 | obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o | 85 | obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o |
86 | obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o | 86 | obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o |
87 | obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o | 87 | obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o |
88 | obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o | ||
89 | obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o | ||
90 | obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o | ||
88 | 91 | ||
89 | # | 92 | # |
90 | # generic algorithms and the async_tx api | 93 | # generic algorithms and the async_tx api |
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index a854df2a5a4b..fdc67d38660b 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c | |||
@@ -141,8 +141,7 @@ err: | |||
141 | 141 | ||
142 | if (walk->iv != req->info) | 142 | if (walk->iv != req->info) |
143 | memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize); | 143 | memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize); |
144 | if (walk->iv_buffer) | 144 | kfree(walk->iv_buffer); |
145 | kfree(walk->iv_buffer); | ||
146 | 145 | ||
147 | return err; | 146 | return err; |
148 | } | 147 | } |
diff --git a/crypto/af_alg.c b/crypto/af_alg.c new file mode 100644 index 000000000000..940d70cb5c25 --- /dev/null +++ b/crypto/af_alg.c | |||
@@ -0,0 +1,483 @@ | |||
1 | /* | ||
2 | * af_alg: User-space algorithm interface | ||
3 | * | ||
4 | * This file provides the user-space API for algorithms. | ||
5 | * | ||
6 | * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the Free | ||
10 | * Software Foundation; either version 2 of the License, or (at your option) | ||
11 | * any later version. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <asm/atomic.h> | ||
16 | #include <crypto/if_alg.h> | ||
17 | #include <linux/crypto.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/list.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/net.h> | ||
23 | #include <linux/rwsem.h> | ||
24 | |||
25 | struct alg_type_list { | ||
26 | const struct af_alg_type *type; | ||
27 | struct list_head list; | ||
28 | }; | ||
29 | |||
30 | static atomic_long_t alg_memory_allocated; | ||
31 | |||
32 | static struct proto alg_proto = { | ||
33 | .name = "ALG", | ||
34 | .owner = THIS_MODULE, | ||
35 | .memory_allocated = &alg_memory_allocated, | ||
36 | .obj_size = sizeof(struct alg_sock), | ||
37 | }; | ||
38 | |||
39 | static LIST_HEAD(alg_types); | ||
40 | static DECLARE_RWSEM(alg_types_sem); | ||
41 | |||
42 | static const struct af_alg_type *alg_get_type(const char *name) | ||
43 | { | ||
44 | const struct af_alg_type *type = ERR_PTR(-ENOENT); | ||
45 | struct alg_type_list *node; | ||
46 | |||
47 | down_read(&alg_types_sem); | ||
48 | list_for_each_entry(node, &alg_types, list) { | ||
49 | if (strcmp(node->type->name, name)) | ||
50 | continue; | ||
51 | |||
52 | if (try_module_get(node->type->owner)) | ||
53 | type = node->type; | ||
54 | break; | ||
55 | } | ||
56 | up_read(&alg_types_sem); | ||
57 | |||
58 | return type; | ||
59 | } | ||
60 | |||
61 | int af_alg_register_type(const struct af_alg_type *type) | ||
62 | { | ||
63 | struct alg_type_list *node; | ||
64 | int err = -EEXIST; | ||
65 | |||
66 | down_write(&alg_types_sem); | ||
67 | list_for_each_entry(node, &alg_types, list) { | ||
68 | if (!strcmp(node->type->name, type->name)) | ||
69 | goto unlock; | ||
70 | } | ||
71 | |||
72 | node = kmalloc(sizeof(*node), GFP_KERNEL); | ||
73 | err = -ENOMEM; | ||
74 | if (!node) | ||
75 | goto unlock; | ||
76 | |||
77 | type->ops->owner = THIS_MODULE; | ||
78 | node->type = type; | ||
79 | list_add(&node->list, &alg_types); | ||
80 | err = 0; | ||
81 | |||
82 | unlock: | ||
83 | up_write(&alg_types_sem); | ||
84 | |||
85 | return err; | ||
86 | } | ||
87 | EXPORT_SYMBOL_GPL(af_alg_register_type); | ||
88 | |||
89 | int af_alg_unregister_type(const struct af_alg_type *type) | ||
90 | { | ||
91 | struct alg_type_list *node; | ||
92 | int err = -ENOENT; | ||
93 | |||
94 | down_write(&alg_types_sem); | ||
95 | list_for_each_entry(node, &alg_types, list) { | ||
96 | if (strcmp(node->type->name, type->name)) | ||
97 | continue; | ||
98 | |||
99 | list_del(&node->list); | ||
100 | kfree(node); | ||
101 | err = 0; | ||
102 | break; | ||
103 | } | ||
104 | up_write(&alg_types_sem); | ||
105 | |||
106 | return err; | ||
107 | } | ||
108 | EXPORT_SYMBOL_GPL(af_alg_unregister_type); | ||
109 | |||
110 | static void alg_do_release(const struct af_alg_type *type, void *private) | ||
111 | { | ||
112 | if (!type) | ||
113 | return; | ||
114 | |||
115 | type->release(private); | ||
116 | module_put(type->owner); | ||
117 | } | ||
118 | |||
119 | int af_alg_release(struct socket *sock) | ||
120 | { | ||
121 | if (sock->sk) | ||
122 | sock_put(sock->sk); | ||
123 | return 0; | ||
124 | } | ||
125 | EXPORT_SYMBOL_GPL(af_alg_release); | ||
126 | |||
127 | static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | ||
128 | { | ||
129 | struct sock *sk = sock->sk; | ||
130 | struct alg_sock *ask = alg_sk(sk); | ||
131 | struct sockaddr_alg *sa = (void *)uaddr; | ||
132 | const struct af_alg_type *type; | ||
133 | void *private; | ||
134 | |||
135 | if (sock->state == SS_CONNECTED) | ||
136 | return -EINVAL; | ||
137 | |||
138 | if (addr_len != sizeof(*sa)) | ||
139 | return -EINVAL; | ||
140 | |||
141 | sa->salg_type[sizeof(sa->salg_type) - 1] = 0; | ||
142 | sa->salg_name[sizeof(sa->salg_name) - 1] = 0; | ||
143 | |||
144 | type = alg_get_type(sa->salg_type); | ||
145 | if (IS_ERR(type) && PTR_ERR(type) == -ENOENT) { | ||
146 | request_module("algif-%s", sa->salg_type); | ||
147 | type = alg_get_type(sa->salg_type); | ||
148 | } | ||
149 | |||
150 | if (IS_ERR(type)) | ||
151 | return PTR_ERR(type); | ||
152 | |||
153 | private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask); | ||
154 | if (IS_ERR(private)) { | ||
155 | module_put(type->owner); | ||
156 | return PTR_ERR(private); | ||
157 | } | ||
158 | |||
159 | lock_sock(sk); | ||
160 | |||
161 | swap(ask->type, type); | ||
162 | swap(ask->private, private); | ||
163 | |||
164 | release_sock(sk); | ||
165 | |||
166 | alg_do_release(type, private); | ||
167 | |||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | static int alg_setkey(struct sock *sk, char __user *ukey, | ||
172 | unsigned int keylen) | ||
173 | { | ||
174 | struct alg_sock *ask = alg_sk(sk); | ||
175 | const struct af_alg_type *type = ask->type; | ||
176 | u8 *key; | ||
177 | int err; | ||
178 | |||
179 | key = sock_kmalloc(sk, keylen, GFP_KERNEL); | ||
180 | if (!key) | ||
181 | return -ENOMEM; | ||
182 | |||
183 | err = -EFAULT; | ||
184 | if (copy_from_user(key, ukey, keylen)) | ||
185 | goto out; | ||
186 | |||
187 | err = type->setkey(ask->private, key, keylen); | ||
188 | |||
189 | out: | ||
190 | sock_kfree_s(sk, key, keylen); | ||
191 | |||
192 | return err; | ||
193 | } | ||
194 | |||
195 | static int alg_setsockopt(struct socket *sock, int level, int optname, | ||
196 | char __user *optval, unsigned int optlen) | ||
197 | { | ||
198 | struct sock *sk = sock->sk; | ||
199 | struct alg_sock *ask = alg_sk(sk); | ||
200 | const struct af_alg_type *type; | ||
201 | int err = -ENOPROTOOPT; | ||
202 | |||
203 | lock_sock(sk); | ||
204 | type = ask->type; | ||
205 | |||
206 | if (level != SOL_ALG || !type) | ||
207 | goto unlock; | ||
208 | |||
209 | switch (optname) { | ||
210 | case ALG_SET_KEY: | ||
211 | if (sock->state == SS_CONNECTED) | ||
212 | goto unlock; | ||
213 | if (!type->setkey) | ||
214 | goto unlock; | ||
215 | |||
216 | err = alg_setkey(sk, optval, optlen); | ||
217 | } | ||
218 | |||
219 | unlock: | ||
220 | release_sock(sk); | ||
221 | |||
222 | return err; | ||
223 | } | ||
224 | |||
225 | int af_alg_accept(struct sock *sk, struct socket *newsock) | ||
226 | { | ||
227 | struct alg_sock *ask = alg_sk(sk); | ||
228 | const struct af_alg_type *type; | ||
229 | struct sock *sk2; | ||
230 | int err; | ||
231 | |||
232 | lock_sock(sk); | ||
233 | type = ask->type; | ||
234 | |||
235 | err = -EINVAL; | ||
236 | if (!type) | ||
237 | goto unlock; | ||
238 | |||
239 | sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto); | ||
240 | err = -ENOMEM; | ||
241 | if (!sk2) | ||
242 | goto unlock; | ||
243 | |||
244 | sock_init_data(newsock, sk2); | ||
245 | sock_graft(sk2, newsock); | ||
246 | |||
247 | err = type->accept(ask->private, sk2); | ||
248 | if (err) { | ||
249 | sk_free(sk2); | ||
250 | goto unlock; | ||
251 | } | ||
252 | |||
253 | sk2->sk_family = PF_ALG; | ||
254 | |||
255 | sock_hold(sk); | ||
256 | alg_sk(sk2)->parent = sk; | ||
257 | alg_sk(sk2)->type = type; | ||
258 | |||
259 | newsock->ops = type->ops; | ||
260 | newsock->state = SS_CONNECTED; | ||
261 | |||
262 | err = 0; | ||
263 | |||
264 | unlock: | ||
265 | release_sock(sk); | ||
266 | |||
267 | return err; | ||
268 | } | ||
269 | EXPORT_SYMBOL_GPL(af_alg_accept); | ||
270 | |||
271 | static int alg_accept(struct socket *sock, struct socket *newsock, int flags) | ||
272 | { | ||
273 | return af_alg_accept(sock->sk, newsock); | ||
274 | } | ||
275 | |||
276 | static const struct proto_ops alg_proto_ops = { | ||
277 | .family = PF_ALG, | ||
278 | .owner = THIS_MODULE, | ||
279 | |||
280 | .connect = sock_no_connect, | ||
281 | .socketpair = sock_no_socketpair, | ||
282 | .getname = sock_no_getname, | ||
283 | .ioctl = sock_no_ioctl, | ||
284 | .listen = sock_no_listen, | ||
285 | .shutdown = sock_no_shutdown, | ||
286 | .getsockopt = sock_no_getsockopt, | ||
287 | .mmap = sock_no_mmap, | ||
288 | .sendpage = sock_no_sendpage, | ||
289 | .sendmsg = sock_no_sendmsg, | ||
290 | .recvmsg = sock_no_recvmsg, | ||
291 | .poll = sock_no_poll, | ||
292 | |||
293 | .bind = alg_bind, | ||
294 | .release = af_alg_release, | ||
295 | .setsockopt = alg_setsockopt, | ||
296 | .accept = alg_accept, | ||
297 | }; | ||
298 | |||
299 | static void alg_sock_destruct(struct sock *sk) | ||
300 | { | ||
301 | struct alg_sock *ask = alg_sk(sk); | ||
302 | |||
303 | alg_do_release(ask->type, ask->private); | ||
304 | } | ||
305 | |||
306 | static int alg_create(struct net *net, struct socket *sock, int protocol, | ||
307 | int kern) | ||
308 | { | ||
309 | struct sock *sk; | ||
310 | int err; | ||
311 | |||
312 | if (sock->type != SOCK_SEQPACKET) | ||
313 | return -ESOCKTNOSUPPORT; | ||
314 | if (protocol != 0) | ||
315 | return -EPROTONOSUPPORT; | ||
316 | |||
317 | err = -ENOMEM; | ||
318 | sk = sk_alloc(net, PF_ALG, GFP_KERNEL, &alg_proto); | ||
319 | if (!sk) | ||
320 | goto out; | ||
321 | |||
322 | sock->ops = &alg_proto_ops; | ||
323 | sock_init_data(sock, sk); | ||
324 | |||
325 | sk->sk_family = PF_ALG; | ||
326 | sk->sk_destruct = alg_sock_destruct; | ||
327 | |||
328 | return 0; | ||
329 | out: | ||
330 | return err; | ||
331 | } | ||
332 | |||
333 | static const struct net_proto_family alg_family = { | ||
334 | .family = PF_ALG, | ||
335 | .create = alg_create, | ||
336 | .owner = THIS_MODULE, | ||
337 | }; | ||
338 | |||
339 | int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len, | ||
340 | int write) | ||
341 | { | ||
342 | unsigned long from = (unsigned long)addr; | ||
343 | unsigned long npages; | ||
344 | unsigned off; | ||
345 | int err; | ||
346 | int i; | ||
347 | |||
348 | err = -EFAULT; | ||
349 | if (!access_ok(write ? VERIFY_READ : VERIFY_WRITE, addr, len)) | ||
350 | goto out; | ||
351 | |||
352 | off = from & ~PAGE_MASK; | ||
353 | npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
354 | if (npages > ALG_MAX_PAGES) | ||
355 | npages = ALG_MAX_PAGES; | ||
356 | |||
357 | err = get_user_pages_fast(from, npages, write, sgl->pages); | ||
358 | if (err < 0) | ||
359 | goto out; | ||
360 | |||
361 | npages = err; | ||
362 | err = -EINVAL; | ||
363 | if (WARN_ON(npages == 0)) | ||
364 | goto out; | ||
365 | |||
366 | err = 0; | ||
367 | |||
368 | sg_init_table(sgl->sg, npages); | ||
369 | |||
370 | for (i = 0; i < npages; i++) { | ||
371 | int plen = min_t(int, len, PAGE_SIZE - off); | ||
372 | |||
373 | sg_set_page(sgl->sg + i, sgl->pages[i], plen, off); | ||
374 | |||
375 | off = 0; | ||
376 | len -= plen; | ||
377 | err += plen; | ||
378 | } | ||
379 | |||
380 | out: | ||
381 | return err; | ||
382 | } | ||
383 | EXPORT_SYMBOL_GPL(af_alg_make_sg); | ||
384 | |||
385 | void af_alg_free_sg(struct af_alg_sgl *sgl) | ||
386 | { | ||
387 | int i; | ||
388 | |||
389 | i = 0; | ||
390 | do { | ||
391 | put_page(sgl->pages[i]); | ||
392 | } while (!sg_is_last(sgl->sg + (i++))); | ||
393 | } | ||
394 | EXPORT_SYMBOL_GPL(af_alg_free_sg); | ||
395 | |||
396 | int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con) | ||
397 | { | ||
398 | struct cmsghdr *cmsg; | ||
399 | |||
400 | for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { | ||
401 | if (!CMSG_OK(msg, cmsg)) | ||
402 | return -EINVAL; | ||
403 | if (cmsg->cmsg_level != SOL_ALG) | ||
404 | continue; | ||
405 | |||
406 | switch(cmsg->cmsg_type) { | ||
407 | case ALG_SET_IV: | ||
408 | if (cmsg->cmsg_len < CMSG_LEN(sizeof(*con->iv))) | ||
409 | return -EINVAL; | ||
410 | con->iv = (void *)CMSG_DATA(cmsg); | ||
411 | if (cmsg->cmsg_len < CMSG_LEN(con->iv->ivlen + | ||
412 | sizeof(*con->iv))) | ||
413 | return -EINVAL; | ||
414 | break; | ||
415 | |||
416 | case ALG_SET_OP: | ||
417 | if (cmsg->cmsg_len < CMSG_LEN(sizeof(u32))) | ||
418 | return -EINVAL; | ||
419 | con->op = *(u32 *)CMSG_DATA(cmsg); | ||
420 | break; | ||
421 | |||
422 | default: | ||
423 | return -EINVAL; | ||
424 | } | ||
425 | } | ||
426 | |||
427 | return 0; | ||
428 | } | ||
429 | EXPORT_SYMBOL_GPL(af_alg_cmsg_send); | ||
430 | |||
431 | int af_alg_wait_for_completion(int err, struct af_alg_completion *completion) | ||
432 | { | ||
433 | switch (err) { | ||
434 | case -EINPROGRESS: | ||
435 | case -EBUSY: | ||
436 | wait_for_completion(&completion->completion); | ||
437 | INIT_COMPLETION(completion->completion); | ||
438 | err = completion->err; | ||
439 | break; | ||
440 | }; | ||
441 | |||
442 | return err; | ||
443 | } | ||
444 | EXPORT_SYMBOL_GPL(af_alg_wait_for_completion); | ||
445 | |||
446 | void af_alg_complete(struct crypto_async_request *req, int err) | ||
447 | { | ||
448 | struct af_alg_completion *completion = req->data; | ||
449 | |||
450 | completion->err = err; | ||
451 | complete(&completion->completion); | ||
452 | } | ||
453 | EXPORT_SYMBOL_GPL(af_alg_complete); | ||
454 | |||
455 | static int __init af_alg_init(void) | ||
456 | { | ||
457 | int err = proto_register(&alg_proto, 0); | ||
458 | |||
459 | if (err) | ||
460 | goto out; | ||
461 | |||
462 | err = sock_register(&alg_family); | ||
463 | if (err != 0) | ||
464 | goto out_unregister_proto; | ||
465 | |||
466 | out: | ||
467 | return err; | ||
468 | |||
469 | out_unregister_proto: | ||
470 | proto_unregister(&alg_proto); | ||
471 | goto out; | ||
472 | } | ||
473 | |||
474 | static void __exit af_alg_exit(void) | ||
475 | { | ||
476 | sock_unregister(PF_ALG); | ||
477 | proto_unregister(&alg_proto); | ||
478 | } | ||
479 | |||
480 | module_init(af_alg_init); | ||
481 | module_exit(af_alg_exit); | ||
482 | MODULE_LICENSE("GPL"); | ||
483 | MODULE_ALIAS_NETPROTO(AF_ALG); | ||
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c new file mode 100644 index 000000000000..62122a1a2f7a --- /dev/null +++ b/crypto/algif_hash.c | |||
@@ -0,0 +1,319 @@ | |||
1 | /* | ||
2 | * algif_hash: User-space interface for hash algorithms | ||
3 | * | ||
4 | * This file provides the user-space API for hash algorithms. | ||
5 | * | ||
6 | * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the Free | ||
10 | * Software Foundation; either version 2 of the License, or (at your option) | ||
11 | * any later version. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <crypto/hash.h> | ||
16 | #include <crypto/if_alg.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/net.h> | ||
22 | #include <net/sock.h> | ||
23 | |||
24 | struct hash_ctx { | ||
25 | struct af_alg_sgl sgl; | ||
26 | |||
27 | u8 *result; | ||
28 | |||
29 | struct af_alg_completion completion; | ||
30 | |||
31 | unsigned int len; | ||
32 | bool more; | ||
33 | |||
34 | struct ahash_request req; | ||
35 | }; | ||
36 | |||
37 | static int hash_sendmsg(struct kiocb *unused, struct socket *sock, | ||
38 | struct msghdr *msg, size_t ignored) | ||
39 | { | ||
40 | int limit = ALG_MAX_PAGES * PAGE_SIZE; | ||
41 | struct sock *sk = sock->sk; | ||
42 | struct alg_sock *ask = alg_sk(sk); | ||
43 | struct hash_ctx *ctx = ask->private; | ||
44 | unsigned long iovlen; | ||
45 | struct iovec *iov; | ||
46 | long copied = 0; | ||
47 | int err; | ||
48 | |||
49 | if (limit > sk->sk_sndbuf) | ||
50 | limit = sk->sk_sndbuf; | ||
51 | |||
52 | lock_sock(sk); | ||
53 | if (!ctx->more) { | ||
54 | err = crypto_ahash_init(&ctx->req); | ||
55 | if (err) | ||
56 | goto unlock; | ||
57 | } | ||
58 | |||
59 | ctx->more = 0; | ||
60 | |||
61 | for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0; | ||
62 | iovlen--, iov++) { | ||
63 | unsigned long seglen = iov->iov_len; | ||
64 | char __user *from = iov->iov_base; | ||
65 | |||
66 | while (seglen) { | ||
67 | int len = min_t(unsigned long, seglen, limit); | ||
68 | int newlen; | ||
69 | |||
70 | newlen = af_alg_make_sg(&ctx->sgl, from, len, 0); | ||
71 | if (newlen < 0) | ||
72 | goto unlock; | ||
73 | |||
74 | ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, | ||
75 | newlen); | ||
76 | |||
77 | err = af_alg_wait_for_completion( | ||
78 | crypto_ahash_update(&ctx->req), | ||
79 | &ctx->completion); | ||
80 | |||
81 | af_alg_free_sg(&ctx->sgl); | ||
82 | |||
83 | if (err) | ||
84 | goto unlock; | ||
85 | |||
86 | seglen -= newlen; | ||
87 | from += newlen; | ||
88 | copied += newlen; | ||
89 | } | ||
90 | } | ||
91 | |||
92 | err = 0; | ||
93 | |||
94 | ctx->more = msg->msg_flags & MSG_MORE; | ||
95 | if (!ctx->more) { | ||
96 | ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); | ||
97 | err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), | ||
98 | &ctx->completion); | ||
99 | } | ||
100 | |||
101 | unlock: | ||
102 | release_sock(sk); | ||
103 | |||
104 | return err ?: copied; | ||
105 | } | ||
106 | |||
107 | static ssize_t hash_sendpage(struct socket *sock, struct page *page, | ||
108 | int offset, size_t size, int flags) | ||
109 | { | ||
110 | struct sock *sk = sock->sk; | ||
111 | struct alg_sock *ask = alg_sk(sk); | ||
112 | struct hash_ctx *ctx = ask->private; | ||
113 | int err; | ||
114 | |||
115 | lock_sock(sk); | ||
116 | sg_init_table(ctx->sgl.sg, 1); | ||
117 | sg_set_page(ctx->sgl.sg, page, size, offset); | ||
118 | |||
119 | ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size); | ||
120 | |||
121 | if (!(flags & MSG_MORE)) { | ||
122 | if (ctx->more) | ||
123 | err = crypto_ahash_finup(&ctx->req); | ||
124 | else | ||
125 | err = crypto_ahash_digest(&ctx->req); | ||
126 | } else { | ||
127 | if (!ctx->more) { | ||
128 | err = crypto_ahash_init(&ctx->req); | ||
129 | if (err) | ||
130 | goto unlock; | ||
131 | } | ||
132 | |||
133 | err = crypto_ahash_update(&ctx->req); | ||
134 | } | ||
135 | |||
136 | err = af_alg_wait_for_completion(err, &ctx->completion); | ||
137 | if (err) | ||
138 | goto unlock; | ||
139 | |||
140 | ctx->more = flags & MSG_MORE; | ||
141 | |||
142 | unlock: | ||
143 | release_sock(sk); | ||
144 | |||
145 | return err ?: size; | ||
146 | } | ||
147 | |||
148 | static int hash_recvmsg(struct kiocb *unused, struct socket *sock, | ||
149 | struct msghdr *msg, size_t len, int flags) | ||
150 | { | ||
151 | struct sock *sk = sock->sk; | ||
152 | struct alg_sock *ask = alg_sk(sk); | ||
153 | struct hash_ctx *ctx = ask->private; | ||
154 | unsigned ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)); | ||
155 | int err; | ||
156 | |||
157 | if (len > ds) | ||
158 | len = ds; | ||
159 | else if (len < ds) | ||
160 | msg->msg_flags |= MSG_TRUNC; | ||
161 | |||
162 | lock_sock(sk); | ||
163 | if (ctx->more) { | ||
164 | ctx->more = 0; | ||
165 | ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); | ||
166 | err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), | ||
167 | &ctx->completion); | ||
168 | if (err) | ||
169 | goto unlock; | ||
170 | } | ||
171 | |||
172 | err = memcpy_toiovec(msg->msg_iov, ctx->result, len); | ||
173 | |||
174 | unlock: | ||
175 | release_sock(sk); | ||
176 | |||
177 | return err ?: len; | ||
178 | } | ||
179 | |||
180 | static int hash_accept(struct socket *sock, struct socket *newsock, int flags) | ||
181 | { | ||
182 | struct sock *sk = sock->sk; | ||
183 | struct alg_sock *ask = alg_sk(sk); | ||
184 | struct hash_ctx *ctx = ask->private; | ||
185 | struct ahash_request *req = &ctx->req; | ||
186 | char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req))]; | ||
187 | struct sock *sk2; | ||
188 | struct alg_sock *ask2; | ||
189 | struct hash_ctx *ctx2; | ||
190 | int err; | ||
191 | |||
192 | err = crypto_ahash_export(req, state); | ||
193 | if (err) | ||
194 | return err; | ||
195 | |||
196 | err = af_alg_accept(ask->parent, newsock); | ||
197 | if (err) | ||
198 | return err; | ||
199 | |||
200 | sk2 = newsock->sk; | ||
201 | ask2 = alg_sk(sk2); | ||
202 | ctx2 = ask2->private; | ||
203 | ctx2->more = 1; | ||
204 | |||
205 | err = crypto_ahash_import(&ctx2->req, state); | ||
206 | if (err) { | ||
207 | sock_orphan(sk2); | ||
208 | sock_put(sk2); | ||
209 | } | ||
210 | |||
211 | return err; | ||
212 | } | ||
213 | |||
214 | static struct proto_ops algif_hash_ops = { | ||
215 | .family = PF_ALG, | ||
216 | |||
217 | .connect = sock_no_connect, | ||
218 | .socketpair = sock_no_socketpair, | ||
219 | .getname = sock_no_getname, | ||
220 | .ioctl = sock_no_ioctl, | ||
221 | .listen = sock_no_listen, | ||
222 | .shutdown = sock_no_shutdown, | ||
223 | .getsockopt = sock_no_getsockopt, | ||
224 | .mmap = sock_no_mmap, | ||
225 | .bind = sock_no_bind, | ||
226 | .setsockopt = sock_no_setsockopt, | ||
227 | .poll = sock_no_poll, | ||
228 | |||
229 | .release = af_alg_release, | ||
230 | .sendmsg = hash_sendmsg, | ||
231 | .sendpage = hash_sendpage, | ||
232 | .recvmsg = hash_recvmsg, | ||
233 | .accept = hash_accept, | ||
234 | }; | ||
235 | |||
236 | static void *hash_bind(const char *name, u32 type, u32 mask) | ||
237 | { | ||
238 | return crypto_alloc_ahash(name, type, mask); | ||
239 | } | ||
240 | |||
241 | static void hash_release(void *private) | ||
242 | { | ||
243 | crypto_free_ahash(private); | ||
244 | } | ||
245 | |||
246 | static int hash_setkey(void *private, const u8 *key, unsigned int keylen) | ||
247 | { | ||
248 | return crypto_ahash_setkey(private, key, keylen); | ||
249 | } | ||
250 | |||
251 | static void hash_sock_destruct(struct sock *sk) | ||
252 | { | ||
253 | struct alg_sock *ask = alg_sk(sk); | ||
254 | struct hash_ctx *ctx = ask->private; | ||
255 | |||
256 | sock_kfree_s(sk, ctx->result, | ||
257 | crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req))); | ||
258 | sock_kfree_s(sk, ctx, ctx->len); | ||
259 | af_alg_release_parent(sk); | ||
260 | } | ||
261 | |||
262 | static int hash_accept_parent(void *private, struct sock *sk) | ||
263 | { | ||
264 | struct hash_ctx *ctx; | ||
265 | struct alg_sock *ask = alg_sk(sk); | ||
266 | unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(private); | ||
267 | unsigned ds = crypto_ahash_digestsize(private); | ||
268 | |||
269 | ctx = sock_kmalloc(sk, len, GFP_KERNEL); | ||
270 | if (!ctx) | ||
271 | return -ENOMEM; | ||
272 | |||
273 | ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL); | ||
274 | if (!ctx->result) { | ||
275 | sock_kfree_s(sk, ctx, len); | ||
276 | return -ENOMEM; | ||
277 | } | ||
278 | |||
279 | memset(ctx->result, 0, ds); | ||
280 | |||
281 | ctx->len = len; | ||
282 | ctx->more = 0; | ||
283 | af_alg_init_completion(&ctx->completion); | ||
284 | |||
285 | ask->private = ctx; | ||
286 | |||
287 | ahash_request_set_tfm(&ctx->req, private); | ||
288 | ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, | ||
289 | af_alg_complete, &ctx->completion); | ||
290 | |||
291 | sk->sk_destruct = hash_sock_destruct; | ||
292 | |||
293 | return 0; | ||
294 | } | ||
295 | |||
296 | static const struct af_alg_type algif_type_hash = { | ||
297 | .bind = hash_bind, | ||
298 | .release = hash_release, | ||
299 | .setkey = hash_setkey, | ||
300 | .accept = hash_accept_parent, | ||
301 | .ops = &algif_hash_ops, | ||
302 | .name = "hash", | ||
303 | .owner = THIS_MODULE | ||
304 | }; | ||
305 | |||
306 | static int __init algif_hash_init(void) | ||
307 | { | ||
308 | return af_alg_register_type(&algif_type_hash); | ||
309 | } | ||
310 | |||
311 | static void __exit algif_hash_exit(void) | ||
312 | { | ||
313 | int err = af_alg_unregister_type(&algif_type_hash); | ||
314 | BUG_ON(err); | ||
315 | } | ||
316 | |||
317 | module_init(algif_hash_init); | ||
318 | module_exit(algif_hash_exit); | ||
319 | MODULE_LICENSE("GPL"); | ||
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c new file mode 100644 index 000000000000..6a6dfc062d2a --- /dev/null +++ b/crypto/algif_skcipher.c | |||
@@ -0,0 +1,632 @@ | |||
1 | /* | ||
2 | * algif_skcipher: User-space interface for skcipher algorithms | ||
3 | * | ||
4 | * This file provides the user-space API for symmetric key ciphers. | ||
5 | * | ||
6 | * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the Free | ||
10 | * Software Foundation; either version 2 of the License, or (at your option) | ||
11 | * any later version. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <crypto/scatterwalk.h> | ||
16 | #include <crypto/skcipher.h> | ||
17 | #include <crypto/if_alg.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/list.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/net.h> | ||
24 | #include <net/sock.h> | ||
25 | |||
26 | struct skcipher_sg_list { | ||
27 | struct list_head list; | ||
28 | |||
29 | int cur; | ||
30 | |||
31 | struct scatterlist sg[0]; | ||
32 | }; | ||
33 | |||
34 | struct skcipher_ctx { | ||
35 | struct list_head tsgl; | ||
36 | struct af_alg_sgl rsgl; | ||
37 | |||
38 | void *iv; | ||
39 | |||
40 | struct af_alg_completion completion; | ||
41 | |||
42 | unsigned used; | ||
43 | |||
44 | unsigned int len; | ||
45 | bool more; | ||
46 | bool merge; | ||
47 | bool enc; | ||
48 | |||
49 | struct ablkcipher_request req; | ||
50 | }; | ||
51 | |||
52 | #define MAX_SGL_ENTS ((PAGE_SIZE - sizeof(struct skcipher_sg_list)) / \ | ||
53 | sizeof(struct scatterlist) - 1) | ||
54 | |||
55 | static inline int skcipher_sndbuf(struct sock *sk) | ||
56 | { | ||
57 | struct alg_sock *ask = alg_sk(sk); | ||
58 | struct skcipher_ctx *ctx = ask->private; | ||
59 | |||
60 | return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - | ||
61 | ctx->used, 0); | ||
62 | } | ||
63 | |||
64 | static inline bool skcipher_writable(struct sock *sk) | ||
65 | { | ||
66 | return PAGE_SIZE <= skcipher_sndbuf(sk); | ||
67 | } | ||
68 | |||
69 | static int skcipher_alloc_sgl(struct sock *sk) | ||
70 | { | ||
71 | struct alg_sock *ask = alg_sk(sk); | ||
72 | struct skcipher_ctx *ctx = ask->private; | ||
73 | struct skcipher_sg_list *sgl; | ||
74 | struct scatterlist *sg = NULL; | ||
75 | |||
76 | sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); | ||
77 | if (!list_empty(&ctx->tsgl)) | ||
78 | sg = sgl->sg; | ||
79 | |||
80 | if (!sg || sgl->cur >= MAX_SGL_ENTS) { | ||
81 | sgl = sock_kmalloc(sk, sizeof(*sgl) + | ||
82 | sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1), | ||
83 | GFP_KERNEL); | ||
84 | if (!sgl) | ||
85 | return -ENOMEM; | ||
86 | |||
87 | sg_init_table(sgl->sg, MAX_SGL_ENTS + 1); | ||
88 | sgl->cur = 0; | ||
89 | |||
90 | if (sg) | ||
91 | scatterwalk_sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); | ||
92 | |||
93 | list_add_tail(&sgl->list, &ctx->tsgl); | ||
94 | } | ||
95 | |||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | static void skcipher_pull_sgl(struct sock *sk, int used) | ||
100 | { | ||
101 | struct alg_sock *ask = alg_sk(sk); | ||
102 | struct skcipher_ctx *ctx = ask->private; | ||
103 | struct skcipher_sg_list *sgl; | ||
104 | struct scatterlist *sg; | ||
105 | int i; | ||
106 | |||
107 | while (!list_empty(&ctx->tsgl)) { | ||
108 | sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list, | ||
109 | list); | ||
110 | sg = sgl->sg; | ||
111 | |||
112 | for (i = 0; i < sgl->cur; i++) { | ||
113 | int plen = min_t(int, used, sg[i].length); | ||
114 | |||
115 | if (!sg_page(sg + i)) | ||
116 | continue; | ||
117 | |||
118 | sg[i].length -= plen; | ||
119 | sg[i].offset += plen; | ||
120 | |||
121 | used -= plen; | ||
122 | ctx->used -= plen; | ||
123 | |||
124 | if (sg[i].length) | ||
125 | return; | ||
126 | |||
127 | put_page(sg_page(sg + i)); | ||
128 | sg_assign_page(sg + i, NULL); | ||
129 | } | ||
130 | |||
131 | list_del(&sgl->list); | ||
132 | sock_kfree_s(sk, sgl, | ||
133 | sizeof(*sgl) + sizeof(sgl->sg[0]) * | ||
134 | (MAX_SGL_ENTS + 1)); | ||
135 | } | ||
136 | |||
137 | if (!ctx->used) | ||
138 | ctx->merge = 0; | ||
139 | } | ||
140 | |||
141 | static void skcipher_free_sgl(struct sock *sk) | ||
142 | { | ||
143 | struct alg_sock *ask = alg_sk(sk); | ||
144 | struct skcipher_ctx *ctx = ask->private; | ||
145 | |||
146 | skcipher_pull_sgl(sk, ctx->used); | ||
147 | } | ||
148 | |||
149 | static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags) | ||
150 | { | ||
151 | long timeout; | ||
152 | DEFINE_WAIT(wait); | ||
153 | int err = -ERESTARTSYS; | ||
154 | |||
155 | if (flags & MSG_DONTWAIT) | ||
156 | return -EAGAIN; | ||
157 | |||
158 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | ||
159 | |||
160 | for (;;) { | ||
161 | if (signal_pending(current)) | ||
162 | break; | ||
163 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | ||
164 | timeout = MAX_SCHEDULE_TIMEOUT; | ||
165 | if (sk_wait_event(sk, &timeout, skcipher_writable(sk))) { | ||
166 | err = 0; | ||
167 | break; | ||
168 | } | ||
169 | } | ||
170 | finish_wait(sk_sleep(sk), &wait); | ||
171 | |||
172 | return err; | ||
173 | } | ||
174 | |||
175 | static void skcipher_wmem_wakeup(struct sock *sk) | ||
176 | { | ||
177 | struct socket_wq *wq; | ||
178 | |||
179 | if (!skcipher_writable(sk)) | ||
180 | return; | ||
181 | |||
182 | rcu_read_lock(); | ||
183 | wq = rcu_dereference(sk->sk_wq); | ||
184 | if (wq_has_sleeper(wq)) | ||
185 | wake_up_interruptible_sync_poll(&wq->wait, POLLIN | | ||
186 | POLLRDNORM | | ||
187 | POLLRDBAND); | ||
188 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); | ||
189 | rcu_read_unlock(); | ||
190 | } | ||
191 | |||
192 | static int skcipher_wait_for_data(struct sock *sk, unsigned flags) | ||
193 | { | ||
194 | struct alg_sock *ask = alg_sk(sk); | ||
195 | struct skcipher_ctx *ctx = ask->private; | ||
196 | long timeout; | ||
197 | DEFINE_WAIT(wait); | ||
198 | int err = -ERESTARTSYS; | ||
199 | |||
200 | if (flags & MSG_DONTWAIT) { | ||
201 | return -EAGAIN; | ||
202 | } | ||
203 | |||
204 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | ||
205 | |||
206 | for (;;) { | ||
207 | if (signal_pending(current)) | ||
208 | break; | ||
209 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | ||
210 | timeout = MAX_SCHEDULE_TIMEOUT; | ||
211 | if (sk_wait_event(sk, &timeout, ctx->used)) { | ||
212 | err = 0; | ||
213 | break; | ||
214 | } | ||
215 | } | ||
216 | finish_wait(sk_sleep(sk), &wait); | ||
217 | |||
218 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | ||
219 | |||
220 | return err; | ||
221 | } | ||
222 | |||
223 | static void skcipher_data_wakeup(struct sock *sk) | ||
224 | { | ||
225 | struct alg_sock *ask = alg_sk(sk); | ||
226 | struct skcipher_ctx *ctx = ask->private; | ||
227 | struct socket_wq *wq; | ||
228 | |||
229 | if (!ctx->used) | ||
230 | return; | ||
231 | |||
232 | rcu_read_lock(); | ||
233 | wq = rcu_dereference(sk->sk_wq); | ||
234 | if (wq_has_sleeper(wq)) | ||
235 | wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | | ||
236 | POLLRDNORM | | ||
237 | POLLRDBAND); | ||
238 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); | ||
239 | rcu_read_unlock(); | ||
240 | } | ||
241 | |||
242 | static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock, | ||
243 | struct msghdr *msg, size_t size) | ||
244 | { | ||
245 | struct sock *sk = sock->sk; | ||
246 | struct alg_sock *ask = alg_sk(sk); | ||
247 | struct skcipher_ctx *ctx = ask->private; | ||
248 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req); | ||
249 | unsigned ivsize = crypto_ablkcipher_ivsize(tfm); | ||
250 | struct skcipher_sg_list *sgl; | ||
251 | struct af_alg_control con = {}; | ||
252 | long copied = 0; | ||
253 | bool enc = 0; | ||
254 | int err; | ||
255 | int i; | ||
256 | |||
257 | if (msg->msg_controllen) { | ||
258 | err = af_alg_cmsg_send(msg, &con); | ||
259 | if (err) | ||
260 | return err; | ||
261 | |||
262 | switch (con.op) { | ||
263 | case ALG_OP_ENCRYPT: | ||
264 | enc = 1; | ||
265 | break; | ||
266 | case ALG_OP_DECRYPT: | ||
267 | enc = 0; | ||
268 | break; | ||
269 | default: | ||
270 | return -EINVAL; | ||
271 | } | ||
272 | |||
273 | if (con.iv && con.iv->ivlen != ivsize) | ||
274 | return -EINVAL; | ||
275 | } | ||
276 | |||
277 | err = -EINVAL; | ||
278 | |||
279 | lock_sock(sk); | ||
280 | if (!ctx->more && ctx->used) | ||
281 | goto unlock; | ||
282 | |||
283 | if (!ctx->used) { | ||
284 | ctx->enc = enc; | ||
285 | if (con.iv) | ||
286 | memcpy(ctx->iv, con.iv->iv, ivsize); | ||
287 | } | ||
288 | |||
289 | while (size) { | ||
290 | struct scatterlist *sg; | ||
291 | unsigned long len = size; | ||
292 | int plen; | ||
293 | |||
294 | if (ctx->merge) { | ||
295 | sgl = list_entry(ctx->tsgl.prev, | ||
296 | struct skcipher_sg_list, list); | ||
297 | sg = sgl->sg + sgl->cur - 1; | ||
298 | len = min_t(unsigned long, len, | ||
299 | PAGE_SIZE - sg->offset - sg->length); | ||
300 | |||
301 | err = memcpy_fromiovec(page_address(sg_page(sg)) + | ||
302 | sg->offset + sg->length, | ||
303 | msg->msg_iov, len); | ||
304 | if (err) | ||
305 | goto unlock; | ||
306 | |||
307 | sg->length += len; | ||
308 | ctx->merge = (sg->offset + sg->length) & | ||
309 | (PAGE_SIZE - 1); | ||
310 | |||
311 | ctx->used += len; | ||
312 | copied += len; | ||
313 | size -= len; | ||
314 | continue; | ||
315 | } | ||
316 | |||
317 | if (!skcipher_writable(sk)) { | ||
318 | err = skcipher_wait_for_wmem(sk, msg->msg_flags); | ||
319 | if (err) | ||
320 | goto unlock; | ||
321 | } | ||
322 | |||
323 | len = min_t(unsigned long, len, skcipher_sndbuf(sk)); | ||
324 | |||
325 | err = skcipher_alloc_sgl(sk); | ||
326 | if (err) | ||
327 | goto unlock; | ||
328 | |||
329 | sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); | ||
330 | sg = sgl->sg; | ||
331 | do { | ||
332 | i = sgl->cur; | ||
333 | plen = min_t(int, len, PAGE_SIZE); | ||
334 | |||
335 | sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); | ||
336 | err = -ENOMEM; | ||
337 | if (!sg_page(sg + i)) | ||
338 | goto unlock; | ||
339 | |||
340 | err = memcpy_fromiovec(page_address(sg_page(sg + i)), | ||
341 | msg->msg_iov, plen); | ||
342 | if (err) { | ||
343 | __free_page(sg_page(sg + i)); | ||
344 | sg_assign_page(sg + i, NULL); | ||
345 | goto unlock; | ||
346 | } | ||
347 | |||
348 | sg[i].length = plen; | ||
349 | len -= plen; | ||
350 | ctx->used += plen; | ||
351 | copied += plen; | ||
352 | size -= plen; | ||
353 | sgl->cur++; | ||
354 | } while (len && sgl->cur < MAX_SGL_ENTS); | ||
355 | |||
356 | ctx->merge = plen & (PAGE_SIZE - 1); | ||
357 | } | ||
358 | |||
359 | err = 0; | ||
360 | |||
361 | ctx->more = msg->msg_flags & MSG_MORE; | ||
362 | if (!ctx->more && !list_empty(&ctx->tsgl)) | ||
363 | sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); | ||
364 | |||
365 | unlock: | ||
366 | skcipher_data_wakeup(sk); | ||
367 | release_sock(sk); | ||
368 | |||
369 | return copied ?: err; | ||
370 | } | ||
371 | |||
372 | static ssize_t skcipher_sendpage(struct socket *sock, struct page *page, | ||
373 | int offset, size_t size, int flags) | ||
374 | { | ||
375 | struct sock *sk = sock->sk; | ||
376 | struct alg_sock *ask = alg_sk(sk); | ||
377 | struct skcipher_ctx *ctx = ask->private; | ||
378 | struct skcipher_sg_list *sgl; | ||
379 | int err = -EINVAL; | ||
380 | |||
381 | lock_sock(sk); | ||
382 | if (!ctx->more && ctx->used) | ||
383 | goto unlock; | ||
384 | |||
385 | if (!size) | ||
386 | goto done; | ||
387 | |||
388 | if (!skcipher_writable(sk)) { | ||
389 | err = skcipher_wait_for_wmem(sk, flags); | ||
390 | if (err) | ||
391 | goto unlock; | ||
392 | } | ||
393 | |||
394 | err = skcipher_alloc_sgl(sk); | ||
395 | if (err) | ||
396 | goto unlock; | ||
397 | |||
398 | ctx->merge = 0; | ||
399 | sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); | ||
400 | |||
401 | get_page(page); | ||
402 | sg_set_page(sgl->sg + sgl->cur, page, size, offset); | ||
403 | sgl->cur++; | ||
404 | ctx->used += size; | ||
405 | |||
406 | done: | ||
407 | ctx->more = flags & MSG_MORE; | ||
408 | if (!ctx->more && !list_empty(&ctx->tsgl)) | ||
409 | sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); | ||
410 | |||
411 | unlock: | ||
412 | skcipher_data_wakeup(sk); | ||
413 | release_sock(sk); | ||
414 | |||
415 | return err ?: size; | ||
416 | } | ||
417 | |||
418 | static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock, | ||
419 | struct msghdr *msg, size_t ignored, int flags) | ||
420 | { | ||
421 | struct sock *sk = sock->sk; | ||
422 | struct alg_sock *ask = alg_sk(sk); | ||
423 | struct skcipher_ctx *ctx = ask->private; | ||
424 | unsigned bs = crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm( | ||
425 | &ctx->req)); | ||
426 | struct skcipher_sg_list *sgl; | ||
427 | struct scatterlist *sg; | ||
428 | unsigned long iovlen; | ||
429 | struct iovec *iov; | ||
430 | int err = -EAGAIN; | ||
431 | int used; | ||
432 | long copied = 0; | ||
433 | |||
434 | lock_sock(sk); | ||
435 | for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0; | ||
436 | iovlen--, iov++) { | ||
437 | unsigned long seglen = iov->iov_len; | ||
438 | char __user *from = iov->iov_base; | ||
439 | |||
440 | while (seglen) { | ||
441 | sgl = list_first_entry(&ctx->tsgl, | ||
442 | struct skcipher_sg_list, list); | ||
443 | sg = sgl->sg; | ||
444 | |||
445 | while (!sg->length) | ||
446 | sg++; | ||
447 | |||
448 | used = ctx->used; | ||
449 | if (!used) { | ||
450 | err = skcipher_wait_for_data(sk, flags); | ||
451 | if (err) | ||
452 | goto unlock; | ||
453 | } | ||
454 | |||
455 | used = min_t(unsigned long, used, seglen); | ||
456 | |||
457 | used = af_alg_make_sg(&ctx->rsgl, from, used, 1); | ||
458 | err = used; | ||
459 | if (err < 0) | ||
460 | goto unlock; | ||
461 | |||
462 | if (ctx->more || used < ctx->used) | ||
463 | used -= used % bs; | ||
464 | |||
465 | err = -EINVAL; | ||
466 | if (!used) | ||
467 | goto free; | ||
468 | |||
469 | ablkcipher_request_set_crypt(&ctx->req, sg, | ||
470 | ctx->rsgl.sg, used, | ||
471 | ctx->iv); | ||
472 | |||
473 | err = af_alg_wait_for_completion( | ||
474 | ctx->enc ? | ||
475 | crypto_ablkcipher_encrypt(&ctx->req) : | ||
476 | crypto_ablkcipher_decrypt(&ctx->req), | ||
477 | &ctx->completion); | ||
478 | |||
479 | free: | ||
480 | af_alg_free_sg(&ctx->rsgl); | ||
481 | |||
482 | if (err) | ||
483 | goto unlock; | ||
484 | |||
485 | copied += used; | ||
486 | from += used; | ||
487 | seglen -= used; | ||
488 | skcipher_pull_sgl(sk, used); | ||
489 | } | ||
490 | } | ||
491 | |||
492 | err = 0; | ||
493 | |||
494 | unlock: | ||
495 | skcipher_wmem_wakeup(sk); | ||
496 | release_sock(sk); | ||
497 | |||
498 | return copied ?: err; | ||
499 | } | ||
500 | |||
501 | |||
502 | static unsigned int skcipher_poll(struct file *file, struct socket *sock, | ||
503 | poll_table *wait) | ||
504 | { | ||
505 | struct sock *sk = sock->sk; | ||
506 | struct alg_sock *ask = alg_sk(sk); | ||
507 | struct skcipher_ctx *ctx = ask->private; | ||
508 | unsigned int mask; | ||
509 | |||
510 | sock_poll_wait(file, sk_sleep(sk), wait); | ||
511 | mask = 0; | ||
512 | |||
513 | if (ctx->used) | ||
514 | mask |= POLLIN | POLLRDNORM; | ||
515 | |||
516 | if (skcipher_writable(sk)) | ||
517 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | ||
518 | |||
519 | return mask; | ||
520 | } | ||
521 | |||
522 | static struct proto_ops algif_skcipher_ops = { | ||
523 | .family = PF_ALG, | ||
524 | |||
525 | .connect = sock_no_connect, | ||
526 | .socketpair = sock_no_socketpair, | ||
527 | .getname = sock_no_getname, | ||
528 | .ioctl = sock_no_ioctl, | ||
529 | .listen = sock_no_listen, | ||
530 | .shutdown = sock_no_shutdown, | ||
531 | .getsockopt = sock_no_getsockopt, | ||
532 | .mmap = sock_no_mmap, | ||
533 | .bind = sock_no_bind, | ||
534 | .accept = sock_no_accept, | ||
535 | .setsockopt = sock_no_setsockopt, | ||
536 | |||
537 | .release = af_alg_release, | ||
538 | .sendmsg = skcipher_sendmsg, | ||
539 | .sendpage = skcipher_sendpage, | ||
540 | .recvmsg = skcipher_recvmsg, | ||
541 | .poll = skcipher_poll, | ||
542 | }; | ||
543 | |||
544 | static void *skcipher_bind(const char *name, u32 type, u32 mask) | ||
545 | { | ||
546 | return crypto_alloc_ablkcipher(name, type, mask); | ||
547 | } | ||
548 | |||
549 | static void skcipher_release(void *private) | ||
550 | { | ||
551 | crypto_free_ablkcipher(private); | ||
552 | } | ||
553 | |||
554 | static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen) | ||
555 | { | ||
556 | return crypto_ablkcipher_setkey(private, key, keylen); | ||
557 | } | ||
558 | |||
559 | static void skcipher_sock_destruct(struct sock *sk) | ||
560 | { | ||
561 | struct alg_sock *ask = alg_sk(sk); | ||
562 | struct skcipher_ctx *ctx = ask->private; | ||
563 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req); | ||
564 | |||
565 | skcipher_free_sgl(sk); | ||
566 | sock_kfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm)); | ||
567 | sock_kfree_s(sk, ctx, ctx->len); | ||
568 | af_alg_release_parent(sk); | ||
569 | } | ||
570 | |||
571 | static int skcipher_accept_parent(void *private, struct sock *sk) | ||
572 | { | ||
573 | struct skcipher_ctx *ctx; | ||
574 | struct alg_sock *ask = alg_sk(sk); | ||
575 | unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(private); | ||
576 | |||
577 | ctx = sock_kmalloc(sk, len, GFP_KERNEL); | ||
578 | if (!ctx) | ||
579 | return -ENOMEM; | ||
580 | |||
581 | ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(private), | ||
582 | GFP_KERNEL); | ||
583 | if (!ctx->iv) { | ||
584 | sock_kfree_s(sk, ctx, len); | ||
585 | return -ENOMEM; | ||
586 | } | ||
587 | |||
588 | memset(ctx->iv, 0, crypto_ablkcipher_ivsize(private)); | ||
589 | |||
590 | INIT_LIST_HEAD(&ctx->tsgl); | ||
591 | ctx->len = len; | ||
592 | ctx->used = 0; | ||
593 | ctx->more = 0; | ||
594 | ctx->merge = 0; | ||
595 | ctx->enc = 0; | ||
596 | af_alg_init_completion(&ctx->completion); | ||
597 | |||
598 | ask->private = ctx; | ||
599 | |||
600 | ablkcipher_request_set_tfm(&ctx->req, private); | ||
601 | ablkcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, | ||
602 | af_alg_complete, &ctx->completion); | ||
603 | |||
604 | sk->sk_destruct = skcipher_sock_destruct; | ||
605 | |||
606 | return 0; | ||
607 | } | ||
608 | |||
609 | static const struct af_alg_type algif_type_skcipher = { | ||
610 | .bind = skcipher_bind, | ||
611 | .release = skcipher_release, | ||
612 | .setkey = skcipher_setkey, | ||
613 | .accept = skcipher_accept_parent, | ||
614 | .ops = &algif_skcipher_ops, | ||
615 | .name = "skcipher", | ||
616 | .owner = THIS_MODULE | ||
617 | }; | ||
618 | |||
619 | static int __init algif_skcipher_init(void) | ||
620 | { | ||
621 | return af_alg_register_type(&algif_type_skcipher); | ||
622 | } | ||
623 | |||
624 | static void __exit algif_skcipher_exit(void) | ||
625 | { | ||
626 | int err = af_alg_unregister_type(&algif_type_skcipher); | ||
627 | BUG_ON(err); | ||
628 | } | ||
629 | |||
630 | module_init(algif_skcipher_init); | ||
631 | module_exit(algif_skcipher_exit); | ||
632 | MODULE_LICENSE("GPL"); | ||
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c index 2bc332142849..ffa0245e2abc 100644 --- a/crypto/ansi_cprng.c +++ b/crypto/ansi_cprng.c | |||
@@ -83,7 +83,7 @@ static void xor_vectors(unsigned char *in1, unsigned char *in2, | |||
83 | } | 83 | } |
84 | /* | 84 | /* |
85 | * Returns DEFAULT_BLK_SZ bytes of random data per call | 85 | * Returns DEFAULT_BLK_SZ bytes of random data per call |
86 | * returns 0 if generation succeded, <0 if something went wrong | 86 | * returns 0 if generation succeeded, <0 if something went wrong |
87 | */ | 87 | */ |
88 | static int _get_more_prng_bytes(struct prng_context *ctx, int cont_test) | 88 | static int _get_more_prng_bytes(struct prng_context *ctx, int cont_test) |
89 | { | 89 | { |
diff --git a/crypto/async_tx/Kconfig b/crypto/async_tx/Kconfig index 5de2ed13b35d..1b11abbb5c91 100644 --- a/crypto/async_tx/Kconfig +++ b/crypto/async_tx/Kconfig | |||
@@ -24,19 +24,6 @@ config ASYNC_RAID6_RECOV | |||
24 | select ASYNC_PQ | 24 | select ASYNC_PQ |
25 | select ASYNC_XOR | 25 | select ASYNC_XOR |
26 | 26 | ||
27 | config ASYNC_RAID6_TEST | ||
28 | tristate "Self test for hardware accelerated raid6 recovery" | ||
29 | depends on ASYNC_RAID6_RECOV | ||
30 | select ASYNC_MEMCPY | ||
31 | ---help--- | ||
32 | This is a one-shot self test that permutes through the | ||
33 | recovery of all the possible two disk failure scenarios for a | ||
34 | N-disk array. Recovery is performed with the asynchronous | ||
35 | raid6 recovery routines, and will optionally use an offload | ||
36 | engine if one is available. | ||
37 | |||
38 | If unsure, say N. | ||
39 | |||
40 | config ASYNC_TX_DISABLE_PQ_VAL_DMA | 27 | config ASYNC_TX_DISABLE_PQ_VAL_DMA |
41 | bool | 28 | bool |
42 | 29 | ||
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index 0ec1fb69d4ea..518c22bd9562 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c | |||
@@ -83,8 +83,8 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, | |||
83 | 83 | ||
84 | memcpy(dest_buf, src_buf, len); | 84 | memcpy(dest_buf, src_buf, len); |
85 | 85 | ||
86 | kunmap_atomic(dest_buf, KM_USER0); | ||
87 | kunmap_atomic(src_buf, KM_USER1); | 86 | kunmap_atomic(src_buf, KM_USER1); |
87 | kunmap_atomic(dest_buf, KM_USER0); | ||
88 | 88 | ||
89 | async_tx_sync_epilog(submit); | 89 | async_tx_sync_epilog(submit); |
90 | } | 90 | } |
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 079ae8ca590b..bc28337fded2 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c | |||
@@ -94,7 +94,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, | |||
94 | if (unlikely(!tx)) | 94 | if (unlikely(!tx)) |
95 | async_tx_quiesce(&submit->depend_tx); | 95 | async_tx_quiesce(&submit->depend_tx); |
96 | 96 | ||
97 | /* spin wait for the preceeding transactions to complete */ | 97 | /* spin wait for the preceding transactions to complete */ |
98 | while (unlikely(!tx)) { | 98 | while (unlikely(!tx)) { |
99 | dma_async_issue_pending(chan); | 99 | dma_async_issue_pending(chan); |
100 | tx = dma->device_prep_dma_xor(chan, dma_dest, | 100 | tx = dma->device_prep_dma_xor(chan, dma_dest, |
diff --git a/crypto/authenc.c b/crypto/authenc.c index a5a22cfcd07b..5ef7ba6b6a76 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c | |||
@@ -107,20 +107,6 @@ badkey: | |||
107 | goto out; | 107 | goto out; |
108 | } | 108 | } |
109 | 109 | ||
110 | static void authenc_chain(struct scatterlist *head, struct scatterlist *sg, | ||
111 | int chain) | ||
112 | { | ||
113 | if (chain) { | ||
114 | head->length += sg->length; | ||
115 | sg = scatterwalk_sg_next(sg); | ||
116 | } | ||
117 | |||
118 | if (sg) | ||
119 | scatterwalk_sg_chain(head, 2, sg); | ||
120 | else | ||
121 | sg_mark_end(head); | ||
122 | } | ||
123 | |||
124 | static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq, | 110 | static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq, |
125 | int err) | 111 | int err) |
126 | { | 112 | { |
@@ -345,7 +331,7 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv, | |||
345 | if (ivsize) { | 331 | if (ivsize) { |
346 | sg_init_table(cipher, 2); | 332 | sg_init_table(cipher, 2); |
347 | sg_set_buf(cipher, iv, ivsize); | 333 | sg_set_buf(cipher, iv, ivsize); |
348 | authenc_chain(cipher, dst, vdst == iv + ivsize); | 334 | scatterwalk_crypto_chain(cipher, dst, vdst == iv + ivsize, 2); |
349 | dst = cipher; | 335 | dst = cipher; |
350 | cryptlen += ivsize; | 336 | cryptlen += ivsize; |
351 | } | 337 | } |
@@ -354,7 +340,7 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv, | |||
354 | authenc_ahash_fn = crypto_authenc_ahash; | 340 | authenc_ahash_fn = crypto_authenc_ahash; |
355 | sg_init_table(asg, 2); | 341 | sg_init_table(asg, 2); |
356 | sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); | 342 | sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); |
357 | authenc_chain(asg, dst, 0); | 343 | scatterwalk_crypto_chain(asg, dst, 0, 2); |
358 | dst = asg; | 344 | dst = asg; |
359 | cryptlen += req->assoclen; | 345 | cryptlen += req->assoclen; |
360 | } | 346 | } |
@@ -499,7 +485,7 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, | |||
499 | if (ivsize) { | 485 | if (ivsize) { |
500 | sg_init_table(cipher, 2); | 486 | sg_init_table(cipher, 2); |
501 | sg_set_buf(cipher, iv, ivsize); | 487 | sg_set_buf(cipher, iv, ivsize); |
502 | authenc_chain(cipher, src, vsrc == iv + ivsize); | 488 | scatterwalk_crypto_chain(cipher, src, vsrc == iv + ivsize, 2); |
503 | src = cipher; | 489 | src = cipher; |
504 | cryptlen += ivsize; | 490 | cryptlen += ivsize; |
505 | } | 491 | } |
@@ -508,7 +494,7 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, | |||
508 | authenc_ahash_fn = crypto_authenc_ahash; | 494 | authenc_ahash_fn = crypto_authenc_ahash; |
509 | sg_init_table(asg, 2); | 495 | sg_init_table(asg, 2); |
510 | sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); | 496 | sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); |
511 | authenc_chain(asg, src, 0); | 497 | scatterwalk_crypto_chain(asg, src, 0, 2); |
512 | src = asg; | 498 | src = asg; |
513 | cryptlen += req->assoclen; | 499 | cryptlen += req->assoclen; |
514 | } | 500 | } |
diff --git a/crypto/authencesn.c b/crypto/authencesn.c new file mode 100644 index 000000000000..136b68b9d8d4 --- /dev/null +++ b/crypto/authencesn.c | |||
@@ -0,0 +1,835 @@ | |||
1 | /* | ||
2 | * authencesn.c - AEAD wrapper for IPsec with extended sequence numbers, | ||
3 | * derived from authenc.c | ||
4 | * | ||
5 | * Copyright (C) 2010 secunet Security Networks AG | ||
6 | * Copyright (C) 2010 Steffen Klassert <steffen.klassert@secunet.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the Free | ||
10 | * Software Foundation; either version 2 of the License, or (at your option) | ||
11 | * any later version. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <crypto/aead.h> | ||
16 | #include <crypto/internal/hash.h> | ||
17 | #include <crypto/internal/skcipher.h> | ||
18 | #include <crypto/authenc.h> | ||
19 | #include <crypto/scatterwalk.h> | ||
20 | #include <linux/err.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/rtnetlink.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/spinlock.h> | ||
27 | |||
28 | struct authenc_esn_instance_ctx { | ||
29 | struct crypto_ahash_spawn auth; | ||
30 | struct crypto_skcipher_spawn enc; | ||
31 | }; | ||
32 | |||
33 | struct crypto_authenc_esn_ctx { | ||
34 | unsigned int reqoff; | ||
35 | struct crypto_ahash *auth; | ||
36 | struct crypto_ablkcipher *enc; | ||
37 | }; | ||
38 | |||
39 | struct authenc_esn_request_ctx { | ||
40 | unsigned int cryptlen; | ||
41 | unsigned int headlen; | ||
42 | unsigned int trailen; | ||
43 | struct scatterlist *sg; | ||
44 | struct scatterlist hsg[2]; | ||
45 | struct scatterlist tsg[1]; | ||
46 | struct scatterlist cipher[2]; | ||
47 | crypto_completion_t complete; | ||
48 | crypto_completion_t update_complete; | ||
49 | crypto_completion_t update_complete2; | ||
50 | char tail[]; | ||
51 | }; | ||
52 | |||
53 | static void authenc_esn_request_complete(struct aead_request *req, int err) | ||
54 | { | ||
55 | if (err != -EINPROGRESS) | ||
56 | aead_request_complete(req, err); | ||
57 | } | ||
58 | |||
59 | static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key, | ||
60 | unsigned int keylen) | ||
61 | { | ||
62 | unsigned int authkeylen; | ||
63 | unsigned int enckeylen; | ||
64 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | ||
65 | struct crypto_ahash *auth = ctx->auth; | ||
66 | struct crypto_ablkcipher *enc = ctx->enc; | ||
67 | struct rtattr *rta = (void *)key; | ||
68 | struct crypto_authenc_key_param *param; | ||
69 | int err = -EINVAL; | ||
70 | |||
71 | if (!RTA_OK(rta, keylen)) | ||
72 | goto badkey; | ||
73 | if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) | ||
74 | goto badkey; | ||
75 | if (RTA_PAYLOAD(rta) < sizeof(*param)) | ||
76 | goto badkey; | ||
77 | |||
78 | param = RTA_DATA(rta); | ||
79 | enckeylen = be32_to_cpu(param->enckeylen); | ||
80 | |||
81 | key += RTA_ALIGN(rta->rta_len); | ||
82 | keylen -= RTA_ALIGN(rta->rta_len); | ||
83 | |||
84 | if (keylen < enckeylen) | ||
85 | goto badkey; | ||
86 | |||
87 | authkeylen = keylen - enckeylen; | ||
88 | |||
89 | crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); | ||
90 | crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) & | ||
91 | CRYPTO_TFM_REQ_MASK); | ||
92 | err = crypto_ahash_setkey(auth, key, authkeylen); | ||
93 | crypto_aead_set_flags(authenc_esn, crypto_ahash_get_flags(auth) & | ||
94 | CRYPTO_TFM_RES_MASK); | ||
95 | |||
96 | if (err) | ||
97 | goto out; | ||
98 | |||
99 | crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); | ||
100 | crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) & | ||
101 | CRYPTO_TFM_REQ_MASK); | ||
102 | err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen); | ||
103 | crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) & | ||
104 | CRYPTO_TFM_RES_MASK); | ||
105 | |||
106 | out: | ||
107 | return err; | ||
108 | |||
109 | badkey: | ||
110 | crypto_aead_set_flags(authenc_esn, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
111 | goto out; | ||
112 | } | ||
113 | |||
114 | static void authenc_esn_geniv_ahash_update_done(struct crypto_async_request *areq, | ||
115 | int err) | ||
116 | { | ||
117 | struct aead_request *req = areq->data; | ||
118 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | ||
119 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | ||
120 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | ||
121 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
122 | |||
123 | if (err) | ||
124 | goto out; | ||
125 | |||
126 | ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result, | ||
127 | areq_ctx->cryptlen); | ||
128 | ahash_request_set_callback(ahreq, aead_request_flags(req) & | ||
129 | CRYPTO_TFM_REQ_MAY_SLEEP, | ||
130 | areq_ctx->update_complete2, req); | ||
131 | |||
132 | err = crypto_ahash_update(ahreq); | ||
133 | if (err) | ||
134 | goto out; | ||
135 | |||
136 | ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result, | ||
137 | areq_ctx->trailen); | ||
138 | ahash_request_set_callback(ahreq, aead_request_flags(req) & | ||
139 | CRYPTO_TFM_REQ_MAY_SLEEP, | ||
140 | areq_ctx->complete, req); | ||
141 | |||
142 | err = crypto_ahash_finup(ahreq); | ||
143 | if (err) | ||
144 | goto out; | ||
145 | |||
146 | scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, | ||
147 | areq_ctx->cryptlen, | ||
148 | crypto_aead_authsize(authenc_esn), 1); | ||
149 | |||
150 | out: | ||
151 | authenc_esn_request_complete(req, err); | ||
152 | } | ||
153 | |||
154 | static void authenc_esn_geniv_ahash_update_done2(struct crypto_async_request *areq, | ||
155 | int err) | ||
156 | { | ||
157 | struct aead_request *req = areq->data; | ||
158 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | ||
159 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | ||
160 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | ||
161 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
162 | |||
163 | if (err) | ||
164 | goto out; | ||
165 | |||
166 | ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result, | ||
167 | areq_ctx->trailen); | ||
168 | ahash_request_set_callback(ahreq, aead_request_flags(req) & | ||
169 | CRYPTO_TFM_REQ_MAY_SLEEP, | ||
170 | areq_ctx->complete, req); | ||
171 | |||
172 | err = crypto_ahash_finup(ahreq); | ||
173 | if (err) | ||
174 | goto out; | ||
175 | |||
176 | scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, | ||
177 | areq_ctx->cryptlen, | ||
178 | crypto_aead_authsize(authenc_esn), 1); | ||
179 | |||
180 | out: | ||
181 | authenc_esn_request_complete(req, err); | ||
182 | } | ||
183 | |||
184 | |||
185 | static void authenc_esn_geniv_ahash_done(struct crypto_async_request *areq, | ||
186 | int err) | ||
187 | { | ||
188 | struct aead_request *req = areq->data; | ||
189 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | ||
190 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | ||
191 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | ||
192 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
193 | |||
194 | if (err) | ||
195 | goto out; | ||
196 | |||
197 | scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, | ||
198 | areq_ctx->cryptlen, | ||
199 | crypto_aead_authsize(authenc_esn), 1); | ||
200 | |||
201 | out: | ||
202 | aead_request_complete(req, err); | ||
203 | } | ||
204 | |||
205 | |||
206 | static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *areq, | ||
207 | int err) | ||
208 | { | ||
209 | u8 *ihash; | ||
210 | unsigned int authsize; | ||
211 | struct ablkcipher_request *abreq; | ||
212 | struct aead_request *req = areq->data; | ||
213 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | ||
214 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | ||
215 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | ||
216 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
217 | unsigned int cryptlen = req->cryptlen; | ||
218 | |||
219 | if (err) | ||
220 | goto out; | ||
221 | |||
222 | ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result, | ||
223 | areq_ctx->cryptlen); | ||
224 | |||
225 | ahash_request_set_callback(ahreq, | ||
226 | aead_request_flags(req) & | ||
227 | CRYPTO_TFM_REQ_MAY_SLEEP, | ||
228 | areq_ctx->update_complete2, req); | ||
229 | |||
230 | err = crypto_ahash_update(ahreq); | ||
231 | if (err) | ||
232 | goto out; | ||
233 | |||
234 | ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result, | ||
235 | areq_ctx->trailen); | ||
236 | ahash_request_set_callback(ahreq, aead_request_flags(req) & | ||
237 | CRYPTO_TFM_REQ_MAY_SLEEP, | ||
238 | areq_ctx->complete, req); | ||
239 | |||
240 | err = crypto_ahash_finup(ahreq); | ||
241 | if (err) | ||
242 | goto out; | ||
243 | |||
244 | authsize = crypto_aead_authsize(authenc_esn); | ||
245 | cryptlen -= authsize; | ||
246 | ihash = ahreq->result + authsize; | ||
247 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | ||
248 | authsize, 0); | ||
249 | |||
250 | err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; | ||
251 | if (err) | ||
252 | goto out; | ||
253 | |||
254 | abreq = aead_request_ctx(req); | ||
255 | ablkcipher_request_set_tfm(abreq, ctx->enc); | ||
256 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | ||
257 | req->base.complete, req->base.data); | ||
258 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, | ||
259 | cryptlen, req->iv); | ||
260 | |||
261 | err = crypto_ablkcipher_decrypt(abreq); | ||
262 | |||
263 | out: | ||
264 | authenc_esn_request_complete(req, err); | ||
265 | } | ||
266 | |||
267 | static void authenc_esn_verify_ahash_update_done2(struct crypto_async_request *areq, | ||
268 | int err) | ||
269 | { | ||
270 | u8 *ihash; | ||
271 | unsigned int authsize; | ||
272 | struct ablkcipher_request *abreq; | ||
273 | struct aead_request *req = areq->data; | ||
274 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | ||
275 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | ||
276 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | ||
277 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
278 | unsigned int cryptlen = req->cryptlen; | ||
279 | |||
280 | if (err) | ||
281 | goto out; | ||
282 | |||
283 | ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result, | ||
284 | areq_ctx->trailen); | ||
285 | ahash_request_set_callback(ahreq, aead_request_flags(req) & | ||
286 | CRYPTO_TFM_REQ_MAY_SLEEP, | ||
287 | areq_ctx->complete, req); | ||
288 | |||
289 | err = crypto_ahash_finup(ahreq); | ||
290 | if (err) | ||
291 | goto out; | ||
292 | |||
293 | authsize = crypto_aead_authsize(authenc_esn); | ||
294 | cryptlen -= authsize; | ||
295 | ihash = ahreq->result + authsize; | ||
296 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | ||
297 | authsize, 0); | ||
298 | |||
299 | err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; | ||
300 | if (err) | ||
301 | goto out; | ||
302 | |||
303 | abreq = aead_request_ctx(req); | ||
304 | ablkcipher_request_set_tfm(abreq, ctx->enc); | ||
305 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | ||
306 | req->base.complete, req->base.data); | ||
307 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, | ||
308 | cryptlen, req->iv); | ||
309 | |||
310 | err = crypto_ablkcipher_decrypt(abreq); | ||
311 | |||
312 | out: | ||
313 | authenc_esn_request_complete(req, err); | ||
314 | } | ||
315 | |||
316 | |||
317 | static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq, | ||
318 | int err) | ||
319 | { | ||
320 | u8 *ihash; | ||
321 | unsigned int authsize; | ||
322 | struct ablkcipher_request *abreq; | ||
323 | struct aead_request *req = areq->data; | ||
324 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | ||
325 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | ||
326 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | ||
327 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
328 | unsigned int cryptlen = req->cryptlen; | ||
329 | |||
330 | if (err) | ||
331 | goto out; | ||
332 | |||
333 | authsize = crypto_aead_authsize(authenc_esn); | ||
334 | cryptlen -= authsize; | ||
335 | ihash = ahreq->result + authsize; | ||
336 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | ||
337 | authsize, 0); | ||
338 | |||
339 | err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; | ||
340 | if (err) | ||
341 | goto out; | ||
342 | |||
343 | abreq = aead_request_ctx(req); | ||
344 | ablkcipher_request_set_tfm(abreq, ctx->enc); | ||
345 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | ||
346 | req->base.complete, req->base.data); | ||
347 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, | ||
348 | cryptlen, req->iv); | ||
349 | |||
350 | err = crypto_ablkcipher_decrypt(abreq); | ||
351 | |||
352 | out: | ||
353 | authenc_esn_request_complete(req, err); | ||
354 | } | ||
355 | |||
356 | static u8 *crypto_authenc_esn_ahash(struct aead_request *req, | ||
357 | unsigned int flags) | ||
358 | { | ||
359 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | ||
360 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | ||
361 | struct crypto_ahash *auth = ctx->auth; | ||
362 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | ||
363 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
364 | u8 *hash = areq_ctx->tail; | ||
365 | int err; | ||
366 | |||
367 | hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth), | ||
368 | crypto_ahash_alignmask(auth) + 1); | ||
369 | |||
370 | ahash_request_set_tfm(ahreq, auth); | ||
371 | |||
372 | err = crypto_ahash_init(ahreq); | ||
373 | if (err) | ||
374 | return ERR_PTR(err); | ||
375 | |||
376 | ahash_request_set_crypt(ahreq, areq_ctx->hsg, hash, areq_ctx->headlen); | ||
377 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, | ||
378 | areq_ctx->update_complete, req); | ||
379 | |||
380 | err = crypto_ahash_update(ahreq); | ||
381 | if (err) | ||
382 | return ERR_PTR(err); | ||
383 | |||
384 | ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, areq_ctx->cryptlen); | ||
385 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, | ||
386 | areq_ctx->update_complete2, req); | ||
387 | |||
388 | err = crypto_ahash_update(ahreq); | ||
389 | if (err) | ||
390 | return ERR_PTR(err); | ||
391 | |||
392 | ahash_request_set_crypt(ahreq, areq_ctx->tsg, hash, | ||
393 | areq_ctx->trailen); | ||
394 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, | ||
395 | areq_ctx->complete, req); | ||
396 | |||
397 | err = crypto_ahash_finup(ahreq); | ||
398 | if (err) | ||
399 | return ERR_PTR(err); | ||
400 | |||
401 | return hash; | ||
402 | } | ||
403 | |||
404 | static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv, | ||
405 | unsigned int flags) | ||
406 | { | ||
407 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | ||
408 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | ||
409 | struct scatterlist *dst = req->dst; | ||
410 | struct scatterlist *assoc = req->assoc; | ||
411 | struct scatterlist *cipher = areq_ctx->cipher; | ||
412 | struct scatterlist *hsg = areq_ctx->hsg; | ||
413 | struct scatterlist *tsg = areq_ctx->tsg; | ||
414 | struct scatterlist *assoc1; | ||
415 | struct scatterlist *assoc2; | ||
416 | unsigned int ivsize = crypto_aead_ivsize(authenc_esn); | ||
417 | unsigned int cryptlen = req->cryptlen; | ||
418 | struct page *dstp; | ||
419 | u8 *vdst; | ||
420 | u8 *hash; | ||
421 | |||
422 | dstp = sg_page(dst); | ||
423 | vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset; | ||
424 | |||
425 | if (ivsize) { | ||
426 | sg_init_table(cipher, 2); | ||
427 | sg_set_buf(cipher, iv, ivsize); | ||
428 | scatterwalk_crypto_chain(cipher, dst, vdst == iv + ivsize, 2); | ||
429 | dst = cipher; | ||
430 | cryptlen += ivsize; | ||
431 | } | ||
432 | |||
433 | if (sg_is_last(assoc)) | ||
434 | return -EINVAL; | ||
435 | |||
436 | assoc1 = assoc + 1; | ||
437 | if (sg_is_last(assoc1)) | ||
438 | return -EINVAL; | ||
439 | |||
440 | assoc2 = assoc + 2; | ||
441 | if (!sg_is_last(assoc2)) | ||
442 | return -EINVAL; | ||
443 | |||
444 | sg_init_table(hsg, 2); | ||
445 | sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset); | ||
446 | sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset); | ||
447 | |||
448 | sg_init_table(tsg, 1); | ||
449 | sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset); | ||
450 | |||
451 | areq_ctx->cryptlen = cryptlen; | ||
452 | areq_ctx->headlen = assoc->length + assoc2->length; | ||
453 | areq_ctx->trailen = assoc1->length; | ||
454 | areq_ctx->sg = dst; | ||
455 | |||
456 | areq_ctx->complete = authenc_esn_geniv_ahash_done; | ||
457 | areq_ctx->update_complete = authenc_esn_geniv_ahash_update_done; | ||
458 | areq_ctx->update_complete2 = authenc_esn_geniv_ahash_update_done2; | ||
459 | |||
460 | hash = crypto_authenc_esn_ahash(req, flags); | ||
461 | if (IS_ERR(hash)) | ||
462 | return PTR_ERR(hash); | ||
463 | |||
464 | scatterwalk_map_and_copy(hash, dst, cryptlen, | ||
465 | crypto_aead_authsize(authenc_esn), 1); | ||
466 | return 0; | ||
467 | } | ||
468 | |||
469 | |||
470 | static void crypto_authenc_esn_encrypt_done(struct crypto_async_request *req, | ||
471 | int err) | ||
472 | { | ||
473 | struct aead_request *areq = req->data; | ||
474 | |||
475 | if (!err) { | ||
476 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(areq); | ||
477 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | ||
478 | struct ablkcipher_request *abreq = aead_request_ctx(areq); | ||
479 | u8 *iv = (u8 *)(abreq + 1) + | ||
480 | crypto_ablkcipher_reqsize(ctx->enc); | ||
481 | |||
482 | err = crypto_authenc_esn_genicv(areq, iv, 0); | ||
483 | } | ||
484 | |||
485 | authenc_esn_request_complete(areq, err); | ||
486 | } | ||
487 | |||
488 | static int crypto_authenc_esn_encrypt(struct aead_request *req) | ||
489 | { | ||
490 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | ||
491 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | ||
492 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | ||
493 | struct crypto_ablkcipher *enc = ctx->enc; | ||
494 | struct scatterlist *dst = req->dst; | ||
495 | unsigned int cryptlen = req->cryptlen; | ||
496 | struct ablkcipher_request *abreq = (void *)(areq_ctx->tail | ||
497 | + ctx->reqoff); | ||
498 | u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(enc); | ||
499 | int err; | ||
500 | |||
501 | ablkcipher_request_set_tfm(abreq, enc); | ||
502 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | ||
503 | crypto_authenc_esn_encrypt_done, req); | ||
504 | ablkcipher_request_set_crypt(abreq, req->src, dst, cryptlen, req->iv); | ||
505 | |||
506 | memcpy(iv, req->iv, crypto_aead_ivsize(authenc_esn)); | ||
507 | |||
508 | err = crypto_ablkcipher_encrypt(abreq); | ||
509 | if (err) | ||
510 | return err; | ||
511 | |||
512 | return crypto_authenc_esn_genicv(req, iv, CRYPTO_TFM_REQ_MAY_SLEEP); | ||
513 | } | ||
514 | |||
515 | static void crypto_authenc_esn_givencrypt_done(struct crypto_async_request *req, | ||
516 | int err) | ||
517 | { | ||
518 | struct aead_request *areq = req->data; | ||
519 | |||
520 | if (!err) { | ||
521 | struct skcipher_givcrypt_request *greq = aead_request_ctx(areq); | ||
522 | |||
523 | err = crypto_authenc_esn_genicv(areq, greq->giv, 0); | ||
524 | } | ||
525 | |||
526 | authenc_esn_request_complete(areq, err); | ||
527 | } | ||
528 | |||
529 | static int crypto_authenc_esn_givencrypt(struct aead_givcrypt_request *req) | ||
530 | { | ||
531 | struct crypto_aead *authenc_esn = aead_givcrypt_reqtfm(req); | ||
532 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | ||
533 | struct aead_request *areq = &req->areq; | ||
534 | struct skcipher_givcrypt_request *greq = aead_request_ctx(areq); | ||
535 | u8 *iv = req->giv; | ||
536 | int err; | ||
537 | |||
538 | skcipher_givcrypt_set_tfm(greq, ctx->enc); | ||
539 | skcipher_givcrypt_set_callback(greq, aead_request_flags(areq), | ||
540 | crypto_authenc_esn_givencrypt_done, areq); | ||
541 | skcipher_givcrypt_set_crypt(greq, areq->src, areq->dst, areq->cryptlen, | ||
542 | areq->iv); | ||
543 | skcipher_givcrypt_set_giv(greq, iv, req->seq); | ||
544 | |||
545 | err = crypto_skcipher_givencrypt(greq); | ||
546 | if (err) | ||
547 | return err; | ||
548 | |||
549 | return crypto_authenc_esn_genicv(areq, iv, CRYPTO_TFM_REQ_MAY_SLEEP); | ||
550 | } | ||
551 | |||
552 | static int crypto_authenc_esn_verify(struct aead_request *req) | ||
553 | { | ||
554 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | ||
555 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | ||
556 | u8 *ohash; | ||
557 | u8 *ihash; | ||
558 | unsigned int authsize; | ||
559 | |||
560 | areq_ctx->complete = authenc_esn_verify_ahash_done; | ||
561 | areq_ctx->update_complete = authenc_esn_verify_ahash_update_done; | ||
562 | |||
563 | ohash = crypto_authenc_esn_ahash(req, CRYPTO_TFM_REQ_MAY_SLEEP); | ||
564 | if (IS_ERR(ohash)) | ||
565 | return PTR_ERR(ohash); | ||
566 | |||
567 | authsize = crypto_aead_authsize(authenc_esn); | ||
568 | ihash = ohash + authsize; | ||
569 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | ||
570 | authsize, 0); | ||
571 | return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0; | ||
572 | } | ||
573 | |||
574 | static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv, | ||
575 | unsigned int cryptlen) | ||
576 | { | ||
577 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | ||
578 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | ||
579 | struct scatterlist *src = req->src; | ||
580 | struct scatterlist *assoc = req->assoc; | ||
581 | struct scatterlist *cipher = areq_ctx->cipher; | ||
582 | struct scatterlist *hsg = areq_ctx->hsg; | ||
583 | struct scatterlist *tsg = areq_ctx->tsg; | ||
584 | struct scatterlist *assoc1; | ||
585 | struct scatterlist *assoc2; | ||
586 | unsigned int ivsize = crypto_aead_ivsize(authenc_esn); | ||
587 | struct page *srcp; | ||
588 | u8 *vsrc; | ||
589 | |||
590 | srcp = sg_page(src); | ||
591 | vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset; | ||
592 | |||
593 | if (ivsize) { | ||
594 | sg_init_table(cipher, 2); | ||
595 | sg_set_buf(cipher, iv, ivsize); | ||
596 | scatterwalk_crypto_chain(cipher, src, vsrc == iv + ivsize, 2); | ||
597 | src = cipher; | ||
598 | cryptlen += ivsize; | ||
599 | } | ||
600 | |||
601 | if (sg_is_last(assoc)) | ||
602 | return -EINVAL; | ||
603 | |||
604 | assoc1 = assoc + 1; | ||
605 | if (sg_is_last(assoc1)) | ||
606 | return -EINVAL; | ||
607 | |||
608 | assoc2 = assoc + 2; | ||
609 | if (!sg_is_last(assoc2)) | ||
610 | return -EINVAL; | ||
611 | |||
612 | sg_init_table(hsg, 2); | ||
613 | sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset); | ||
614 | sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset); | ||
615 | |||
616 | sg_init_table(tsg, 1); | ||
617 | sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset); | ||
618 | |||
619 | areq_ctx->cryptlen = cryptlen; | ||
620 | areq_ctx->headlen = assoc->length + assoc2->length; | ||
621 | areq_ctx->trailen = assoc1->length; | ||
622 | areq_ctx->sg = src; | ||
623 | |||
624 | areq_ctx->complete = authenc_esn_verify_ahash_done; | ||
625 | areq_ctx->update_complete = authenc_esn_verify_ahash_update_done; | ||
626 | areq_ctx->update_complete2 = authenc_esn_verify_ahash_update_done2; | ||
627 | |||
628 | return crypto_authenc_esn_verify(req); | ||
629 | } | ||
630 | |||
631 | static int crypto_authenc_esn_decrypt(struct aead_request *req) | ||
632 | { | ||
633 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | ||
634 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | ||
635 | struct ablkcipher_request *abreq = aead_request_ctx(req); | ||
636 | unsigned int cryptlen = req->cryptlen; | ||
637 | unsigned int authsize = crypto_aead_authsize(authenc_esn); | ||
638 | u8 *iv = req->iv; | ||
639 | int err; | ||
640 | |||
641 | if (cryptlen < authsize) | ||
642 | return -EINVAL; | ||
643 | cryptlen -= authsize; | ||
644 | |||
645 | err = crypto_authenc_esn_iverify(req, iv, cryptlen); | ||
646 | if (err) | ||
647 | return err; | ||
648 | |||
649 | ablkcipher_request_set_tfm(abreq, ctx->enc); | ||
650 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | ||
651 | req->base.complete, req->base.data); | ||
652 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, iv); | ||
653 | |||
654 | return crypto_ablkcipher_decrypt(abreq); | ||
655 | } | ||
656 | |||
657 | static int crypto_authenc_esn_init_tfm(struct crypto_tfm *tfm) | ||
658 | { | ||
659 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | ||
660 | struct authenc_esn_instance_ctx *ictx = crypto_instance_ctx(inst); | ||
661 | struct crypto_authenc_esn_ctx *ctx = crypto_tfm_ctx(tfm); | ||
662 | struct crypto_ahash *auth; | ||
663 | struct crypto_ablkcipher *enc; | ||
664 | int err; | ||
665 | |||
666 | auth = crypto_spawn_ahash(&ictx->auth); | ||
667 | if (IS_ERR(auth)) | ||
668 | return PTR_ERR(auth); | ||
669 | |||
670 | enc = crypto_spawn_skcipher(&ictx->enc); | ||
671 | err = PTR_ERR(enc); | ||
672 | if (IS_ERR(enc)) | ||
673 | goto err_free_ahash; | ||
674 | |||
675 | ctx->auth = auth; | ||
676 | ctx->enc = enc; | ||
677 | |||
678 | ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) + | ||
679 | crypto_ahash_alignmask(auth), | ||
680 | crypto_ahash_alignmask(auth) + 1) + | ||
681 | crypto_ablkcipher_ivsize(enc); | ||
682 | |||
683 | tfm->crt_aead.reqsize = sizeof(struct authenc_esn_request_ctx) + | ||
684 | ctx->reqoff + | ||
685 | max_t(unsigned int, | ||
686 | crypto_ahash_reqsize(auth) + | ||
687 | sizeof(struct ahash_request), | ||
688 | sizeof(struct skcipher_givcrypt_request) + | ||
689 | crypto_ablkcipher_reqsize(enc)); | ||
690 | |||
691 | return 0; | ||
692 | |||
693 | err_free_ahash: | ||
694 | crypto_free_ahash(auth); | ||
695 | return err; | ||
696 | } | ||
697 | |||
698 | static void crypto_authenc_esn_exit_tfm(struct crypto_tfm *tfm) | ||
699 | { | ||
700 | struct crypto_authenc_esn_ctx *ctx = crypto_tfm_ctx(tfm); | ||
701 | |||
702 | crypto_free_ahash(ctx->auth); | ||
703 | crypto_free_ablkcipher(ctx->enc); | ||
704 | } | ||
705 | |||
706 | static struct crypto_instance *crypto_authenc_esn_alloc(struct rtattr **tb) | ||
707 | { | ||
708 | struct crypto_attr_type *algt; | ||
709 | struct crypto_instance *inst; | ||
710 | struct hash_alg_common *auth; | ||
711 | struct crypto_alg *auth_base; | ||
712 | struct crypto_alg *enc; | ||
713 | struct authenc_esn_instance_ctx *ctx; | ||
714 | const char *enc_name; | ||
715 | int err; | ||
716 | |||
717 | algt = crypto_get_attr_type(tb); | ||
718 | err = PTR_ERR(algt); | ||
719 | if (IS_ERR(algt)) | ||
720 | return ERR_PTR(err); | ||
721 | |||
722 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) | ||
723 | return ERR_PTR(-EINVAL); | ||
724 | |||
725 | auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, | ||
726 | CRYPTO_ALG_TYPE_AHASH_MASK); | ||
727 | if (IS_ERR(auth)) | ||
728 | return ERR_CAST(auth); | ||
729 | |||
730 | auth_base = &auth->base; | ||
731 | |||
732 | enc_name = crypto_attr_alg_name(tb[2]); | ||
733 | err = PTR_ERR(enc_name); | ||
734 | if (IS_ERR(enc_name)) | ||
735 | goto out_put_auth; | ||
736 | |||
737 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | ||
738 | err = -ENOMEM; | ||
739 | if (!inst) | ||
740 | goto out_put_auth; | ||
741 | |||
742 | ctx = crypto_instance_ctx(inst); | ||
743 | |||
744 | err = crypto_init_ahash_spawn(&ctx->auth, auth, inst); | ||
745 | if (err) | ||
746 | goto err_free_inst; | ||
747 | |||
748 | crypto_set_skcipher_spawn(&ctx->enc, inst); | ||
749 | err = crypto_grab_skcipher(&ctx->enc, enc_name, 0, | ||
750 | crypto_requires_sync(algt->type, | ||
751 | algt->mask)); | ||
752 | if (err) | ||
753 | goto err_drop_auth; | ||
754 | |||
755 | enc = crypto_skcipher_spawn_alg(&ctx->enc); | ||
756 | |||
757 | err = -ENAMETOOLONG; | ||
758 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, | ||
759 | "authencesn(%s,%s)", auth_base->cra_name, enc->cra_name) >= | ||
760 | CRYPTO_MAX_ALG_NAME) | ||
761 | goto err_drop_enc; | ||
762 | |||
763 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | ||
764 | "authencesn(%s,%s)", auth_base->cra_driver_name, | ||
765 | enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | ||
766 | goto err_drop_enc; | ||
767 | |||
768 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; | ||
769 | inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC; | ||
770 | inst->alg.cra_priority = enc->cra_priority * | ||
771 | 10 + auth_base->cra_priority; | ||
772 | inst->alg.cra_blocksize = enc->cra_blocksize; | ||
773 | inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask; | ||
774 | inst->alg.cra_type = &crypto_aead_type; | ||
775 | |||
776 | inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize; | ||
777 | inst->alg.cra_aead.maxauthsize = auth->digestsize; | ||
778 | |||
779 | inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx); | ||
780 | |||
781 | inst->alg.cra_init = crypto_authenc_esn_init_tfm; | ||
782 | inst->alg.cra_exit = crypto_authenc_esn_exit_tfm; | ||
783 | |||
784 | inst->alg.cra_aead.setkey = crypto_authenc_esn_setkey; | ||
785 | inst->alg.cra_aead.encrypt = crypto_authenc_esn_encrypt; | ||
786 | inst->alg.cra_aead.decrypt = crypto_authenc_esn_decrypt; | ||
787 | inst->alg.cra_aead.givencrypt = crypto_authenc_esn_givencrypt; | ||
788 | |||
789 | out: | ||
790 | crypto_mod_put(auth_base); | ||
791 | return inst; | ||
792 | |||
793 | err_drop_enc: | ||
794 | crypto_drop_skcipher(&ctx->enc); | ||
795 | err_drop_auth: | ||
796 | crypto_drop_ahash(&ctx->auth); | ||
797 | err_free_inst: | ||
798 | kfree(inst); | ||
799 | out_put_auth: | ||
800 | inst = ERR_PTR(err); | ||
801 | goto out; | ||
802 | } | ||
803 | |||
804 | static void crypto_authenc_esn_free(struct crypto_instance *inst) | ||
805 | { | ||
806 | struct authenc_esn_instance_ctx *ctx = crypto_instance_ctx(inst); | ||
807 | |||
808 | crypto_drop_skcipher(&ctx->enc); | ||
809 | crypto_drop_ahash(&ctx->auth); | ||
810 | kfree(inst); | ||
811 | } | ||
812 | |||
813 | static struct crypto_template crypto_authenc_esn_tmpl = { | ||
814 | .name = "authencesn", | ||
815 | .alloc = crypto_authenc_esn_alloc, | ||
816 | .free = crypto_authenc_esn_free, | ||
817 | .module = THIS_MODULE, | ||
818 | }; | ||
819 | |||
820 | static int __init crypto_authenc_esn_module_init(void) | ||
821 | { | ||
822 | return crypto_register_template(&crypto_authenc_esn_tmpl); | ||
823 | } | ||
824 | |||
825 | static void __exit crypto_authenc_esn_module_exit(void) | ||
826 | { | ||
827 | crypto_unregister_template(&crypto_authenc_esn_tmpl); | ||
828 | } | ||
829 | |||
830 | module_init(crypto_authenc_esn_module_init); | ||
831 | module_exit(crypto_authenc_esn_module_exit); | ||
832 | |||
833 | MODULE_LICENSE("GPL"); | ||
834 | MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); | ||
835 | MODULE_DESCRIPTION("AEAD wrapper for IPsec with extended sequence numbers"); | ||
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index 90d26c91f4e9..7a7219266e3c 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c | |||
@@ -89,9 +89,9 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk, | |||
89 | memcpy(walk->dst.virt.addr, walk->page, n); | 89 | memcpy(walk->dst.virt.addr, walk->page, n); |
90 | blkcipher_unmap_dst(walk); | 90 | blkcipher_unmap_dst(walk); |
91 | } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) { | 91 | } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) { |
92 | blkcipher_unmap_src(walk); | ||
93 | if (walk->flags & BLKCIPHER_WALK_DIFF) | 92 | if (walk->flags & BLKCIPHER_WALK_DIFF) |
94 | blkcipher_unmap_dst(walk); | 93 | blkcipher_unmap_dst(walk); |
94 | blkcipher_unmap_src(walk); | ||
95 | } | 95 | } |
96 | 96 | ||
97 | scatterwalk_advance(&walk->in, n); | 97 | scatterwalk_advance(&walk->in, n); |
diff --git a/crypto/cast5.c b/crypto/cast5.c index a1d2294b50ad..4a230ddec877 100644 --- a/crypto/cast5.c +++ b/crypto/cast5.c | |||
@@ -604,36 +604,23 @@ static void cast5_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) | |||
604 | * Rounds 3, 6, 9, 12, and 15 use f function Type 3. | 604 | * Rounds 3, 6, 9, 12, and 15 use f function Type 3. |
605 | */ | 605 | */ |
606 | 606 | ||
607 | t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); | ||
608 | t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]); | ||
609 | t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]); | ||
610 | t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]); | ||
611 | t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]); | ||
612 | t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]); | ||
613 | t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]); | ||
614 | t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]); | ||
615 | t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]); | ||
616 | t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]); | ||
617 | t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]); | ||
618 | t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]); | ||
607 | if (!(c->rr)) { | 619 | if (!(c->rr)) { |
608 | t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); | ||
609 | t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]); | ||
610 | t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]); | ||
611 | t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]); | ||
612 | t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]); | ||
613 | t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]); | ||
614 | t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]); | ||
615 | t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]); | ||
616 | t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]); | ||
617 | t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]); | ||
618 | t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]); | ||
619 | t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]); | ||
620 | t = l; l = r; r = t ^ F1(r, Km[12], Kr[12]); | 620 | t = l; l = r; r = t ^ F1(r, Km[12], Kr[12]); |
621 | t = l; l = r; r = t ^ F2(r, Km[13], Kr[13]); | 621 | t = l; l = r; r = t ^ F2(r, Km[13], Kr[13]); |
622 | t = l; l = r; r = t ^ F3(r, Km[14], Kr[14]); | 622 | t = l; l = r; r = t ^ F3(r, Km[14], Kr[14]); |
623 | t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]); | 623 | t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]); |
624 | } else { | ||
625 | t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); | ||
626 | t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]); | ||
627 | t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]); | ||
628 | t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]); | ||
629 | t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]); | ||
630 | t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]); | ||
631 | t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]); | ||
632 | t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]); | ||
633 | t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]); | ||
634 | t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]); | ||
635 | t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]); | ||
636 | t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]); | ||
637 | } | 624 | } |
638 | 625 | ||
639 | /* c1...c64 <-- (R16,L16). (Exchange final blocks L16, R16 and | 626 | /* c1...c64 <-- (R16,L16). (Exchange final blocks L16, R16 and |
@@ -663,32 +650,19 @@ static void cast5_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) | |||
663 | t = l; l = r; r = t ^ F3(r, Km[14], Kr[14]); | 650 | t = l; l = r; r = t ^ F3(r, Km[14], Kr[14]); |
664 | t = l; l = r; r = t ^ F2(r, Km[13], Kr[13]); | 651 | t = l; l = r; r = t ^ F2(r, Km[13], Kr[13]); |
665 | t = l; l = r; r = t ^ F1(r, Km[12], Kr[12]); | 652 | t = l; l = r; r = t ^ F1(r, Km[12], Kr[12]); |
666 | t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]); | ||
667 | t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]); | ||
668 | t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]); | ||
669 | t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]); | ||
670 | t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]); | ||
671 | t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]); | ||
672 | t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]); | ||
673 | t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]); | ||
674 | t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]); | ||
675 | t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]); | ||
676 | t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]); | ||
677 | t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); | ||
678 | } else { | ||
679 | t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]); | ||
680 | t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]); | ||
681 | t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]); | ||
682 | t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]); | ||
683 | t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]); | ||
684 | t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]); | ||
685 | t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]); | ||
686 | t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]); | ||
687 | t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]); | ||
688 | t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]); | ||
689 | t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]); | ||
690 | t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); | ||
691 | } | 653 | } |
654 | t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]); | ||
655 | t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]); | ||
656 | t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]); | ||
657 | t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]); | ||
658 | t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]); | ||
659 | t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]); | ||
660 | t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]); | ||
661 | t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]); | ||
662 | t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]); | ||
663 | t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]); | ||
664 | t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]); | ||
665 | t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); | ||
692 | 666 | ||
693 | dst[0] = cpu_to_be32(r); | 667 | dst[0] = cpu_to_be32(r); |
694 | dst[1] = cpu_to_be32(l); | 668 | dst[1] = cpu_to_be32(l); |
diff --git a/crypto/cryptd.c b/crypto/cryptd.c index ef71318976c7..e46d21ae26bc 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c | |||
@@ -3,6 +3,13 @@ | |||
3 | * | 3 | * |
4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | 4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
5 | * | 5 | * |
6 | * Added AEAD support to cryptd. | ||
7 | * Authors: Tadeusz Struk (tadeusz.struk@intel.com) | ||
8 | * Adrian Hoban <adrian.hoban@intel.com> | ||
9 | * Gabriele Paoloni <gabriele.paoloni@intel.com> | ||
10 | * Aidan O'Mahony (aidan.o.mahony@intel.com) | ||
11 | * Copyright (c) 2010, Intel Corporation. | ||
12 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | 13 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License as published by the Free | 14 | * under the terms of the GNU General Public License as published by the Free |
8 | * Software Foundation; either version 2 of the License, or (at your option) | 15 | * Software Foundation; either version 2 of the License, or (at your option) |
@@ -12,6 +19,7 @@ | |||
12 | 19 | ||
13 | #include <crypto/algapi.h> | 20 | #include <crypto/algapi.h> |
14 | #include <crypto/internal/hash.h> | 21 | #include <crypto/internal/hash.h> |
22 | #include <crypto/internal/aead.h> | ||
15 | #include <crypto/cryptd.h> | 23 | #include <crypto/cryptd.h> |
16 | #include <crypto/crypto_wq.h> | 24 | #include <crypto/crypto_wq.h> |
17 | #include <linux/err.h> | 25 | #include <linux/err.h> |
@@ -44,6 +52,11 @@ struct hashd_instance_ctx { | |||
44 | struct cryptd_queue *queue; | 52 | struct cryptd_queue *queue; |
45 | }; | 53 | }; |
46 | 54 | ||
55 | struct aead_instance_ctx { | ||
56 | struct crypto_aead_spawn aead_spawn; | ||
57 | struct cryptd_queue *queue; | ||
58 | }; | ||
59 | |||
47 | struct cryptd_blkcipher_ctx { | 60 | struct cryptd_blkcipher_ctx { |
48 | struct crypto_blkcipher *child; | 61 | struct crypto_blkcipher *child; |
49 | }; | 62 | }; |
@@ -61,6 +74,14 @@ struct cryptd_hash_request_ctx { | |||
61 | struct shash_desc desc; | 74 | struct shash_desc desc; |
62 | }; | 75 | }; |
63 | 76 | ||
77 | struct cryptd_aead_ctx { | ||
78 | struct crypto_aead *child; | ||
79 | }; | ||
80 | |||
81 | struct cryptd_aead_request_ctx { | ||
82 | crypto_completion_t complete; | ||
83 | }; | ||
84 | |||
64 | static void cryptd_queue_worker(struct work_struct *work); | 85 | static void cryptd_queue_worker(struct work_struct *work); |
65 | 86 | ||
66 | static int cryptd_init_queue(struct cryptd_queue *queue, | 87 | static int cryptd_init_queue(struct cryptd_queue *queue, |
@@ -601,6 +622,144 @@ out_put_alg: | |||
601 | return err; | 622 | return err; |
602 | } | 623 | } |
603 | 624 | ||
625 | static void cryptd_aead_crypt(struct aead_request *req, | ||
626 | struct crypto_aead *child, | ||
627 | int err, | ||
628 | int (*crypt)(struct aead_request *req)) | ||
629 | { | ||
630 | struct cryptd_aead_request_ctx *rctx; | ||
631 | rctx = aead_request_ctx(req); | ||
632 | |||
633 | if (unlikely(err == -EINPROGRESS)) | ||
634 | goto out; | ||
635 | aead_request_set_tfm(req, child); | ||
636 | err = crypt( req ); | ||
637 | req->base.complete = rctx->complete; | ||
638 | out: | ||
639 | local_bh_disable(); | ||
640 | rctx->complete(&req->base, err); | ||
641 | local_bh_enable(); | ||
642 | } | ||
643 | |||
644 | static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err) | ||
645 | { | ||
646 | struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); | ||
647 | struct crypto_aead *child = ctx->child; | ||
648 | struct aead_request *req; | ||
649 | |||
650 | req = container_of(areq, struct aead_request, base); | ||
651 | cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt); | ||
652 | } | ||
653 | |||
654 | static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err) | ||
655 | { | ||
656 | struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); | ||
657 | struct crypto_aead *child = ctx->child; | ||
658 | struct aead_request *req; | ||
659 | |||
660 | req = container_of(areq, struct aead_request, base); | ||
661 | cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt); | ||
662 | } | ||
663 | |||
664 | static int cryptd_aead_enqueue(struct aead_request *req, | ||
665 | crypto_completion_t complete) | ||
666 | { | ||
667 | struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req); | ||
668 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
669 | struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm)); | ||
670 | |||
671 | rctx->complete = req->base.complete; | ||
672 | req->base.complete = complete; | ||
673 | return cryptd_enqueue_request(queue, &req->base); | ||
674 | } | ||
675 | |||
676 | static int cryptd_aead_encrypt_enqueue(struct aead_request *req) | ||
677 | { | ||
678 | return cryptd_aead_enqueue(req, cryptd_aead_encrypt ); | ||
679 | } | ||
680 | |||
681 | static int cryptd_aead_decrypt_enqueue(struct aead_request *req) | ||
682 | { | ||
683 | return cryptd_aead_enqueue(req, cryptd_aead_decrypt ); | ||
684 | } | ||
685 | |||
686 | static int cryptd_aead_init_tfm(struct crypto_tfm *tfm) | ||
687 | { | ||
688 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | ||
689 | struct aead_instance_ctx *ictx = crypto_instance_ctx(inst); | ||
690 | struct crypto_aead_spawn *spawn = &ictx->aead_spawn; | ||
691 | struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm); | ||
692 | struct crypto_aead *cipher; | ||
693 | |||
694 | cipher = crypto_spawn_aead(spawn); | ||
695 | if (IS_ERR(cipher)) | ||
696 | return PTR_ERR(cipher); | ||
697 | |||
698 | crypto_aead_set_flags(cipher, CRYPTO_TFM_REQ_MAY_SLEEP); | ||
699 | ctx->child = cipher; | ||
700 | tfm->crt_aead.reqsize = sizeof(struct cryptd_aead_request_ctx); | ||
701 | return 0; | ||
702 | } | ||
703 | |||
704 | static void cryptd_aead_exit_tfm(struct crypto_tfm *tfm) | ||
705 | { | ||
706 | struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm); | ||
707 | crypto_free_aead(ctx->child); | ||
708 | } | ||
709 | |||
710 | static int cryptd_create_aead(struct crypto_template *tmpl, | ||
711 | struct rtattr **tb, | ||
712 | struct cryptd_queue *queue) | ||
713 | { | ||
714 | struct aead_instance_ctx *ctx; | ||
715 | struct crypto_instance *inst; | ||
716 | struct crypto_alg *alg; | ||
717 | int err; | ||
718 | |||
719 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_AEAD, | ||
720 | CRYPTO_ALG_TYPE_MASK); | ||
721 | if (IS_ERR(alg)) | ||
722 | return PTR_ERR(alg); | ||
723 | |||
724 | inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx)); | ||
725 | err = PTR_ERR(inst); | ||
726 | if (IS_ERR(inst)) | ||
727 | goto out_put_alg; | ||
728 | |||
729 | ctx = crypto_instance_ctx(inst); | ||
730 | ctx->queue = queue; | ||
731 | |||
732 | err = crypto_init_spawn(&ctx->aead_spawn.base, alg, inst, | ||
733 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); | ||
734 | if (err) | ||
735 | goto out_free_inst; | ||
736 | |||
737 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; | ||
738 | inst->alg.cra_type = alg->cra_type; | ||
739 | inst->alg.cra_ctxsize = sizeof(struct cryptd_aead_ctx); | ||
740 | inst->alg.cra_init = cryptd_aead_init_tfm; | ||
741 | inst->alg.cra_exit = cryptd_aead_exit_tfm; | ||
742 | inst->alg.cra_aead.setkey = alg->cra_aead.setkey; | ||
743 | inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize; | ||
744 | inst->alg.cra_aead.geniv = alg->cra_aead.geniv; | ||
745 | inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize; | ||
746 | inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize; | ||
747 | inst->alg.cra_aead.encrypt = cryptd_aead_encrypt_enqueue; | ||
748 | inst->alg.cra_aead.decrypt = cryptd_aead_decrypt_enqueue; | ||
749 | inst->alg.cra_aead.givencrypt = alg->cra_aead.givencrypt; | ||
750 | inst->alg.cra_aead.givdecrypt = alg->cra_aead.givdecrypt; | ||
751 | |||
752 | err = crypto_register_instance(tmpl, inst); | ||
753 | if (err) { | ||
754 | crypto_drop_spawn(&ctx->aead_spawn.base); | ||
755 | out_free_inst: | ||
756 | kfree(inst); | ||
757 | } | ||
758 | out_put_alg: | ||
759 | crypto_mod_put(alg); | ||
760 | return err; | ||
761 | } | ||
762 | |||
604 | static struct cryptd_queue queue; | 763 | static struct cryptd_queue queue; |
605 | 764 | ||
606 | static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) | 765 | static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) |
@@ -616,6 +775,8 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
616 | return cryptd_create_blkcipher(tmpl, tb, &queue); | 775 | return cryptd_create_blkcipher(tmpl, tb, &queue); |
617 | case CRYPTO_ALG_TYPE_DIGEST: | 776 | case CRYPTO_ALG_TYPE_DIGEST: |
618 | return cryptd_create_hash(tmpl, tb, &queue); | 777 | return cryptd_create_hash(tmpl, tb, &queue); |
778 | case CRYPTO_ALG_TYPE_AEAD: | ||
779 | return cryptd_create_aead(tmpl, tb, &queue); | ||
619 | } | 780 | } |
620 | 781 | ||
621 | return -EINVAL; | 782 | return -EINVAL; |
@@ -625,16 +786,21 @@ static void cryptd_free(struct crypto_instance *inst) | |||
625 | { | 786 | { |
626 | struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); | 787 | struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); |
627 | struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); | 788 | struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); |
789 | struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst); | ||
628 | 790 | ||
629 | switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { | 791 | switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { |
630 | case CRYPTO_ALG_TYPE_AHASH: | 792 | case CRYPTO_ALG_TYPE_AHASH: |
631 | crypto_drop_shash(&hctx->spawn); | 793 | crypto_drop_shash(&hctx->spawn); |
632 | kfree(ahash_instance(inst)); | 794 | kfree(ahash_instance(inst)); |
633 | return; | 795 | return; |
796 | case CRYPTO_ALG_TYPE_AEAD: | ||
797 | crypto_drop_spawn(&aead_ctx->aead_spawn.base); | ||
798 | kfree(inst); | ||
799 | return; | ||
800 | default: | ||
801 | crypto_drop_spawn(&ctx->spawn); | ||
802 | kfree(inst); | ||
634 | } | 803 | } |
635 | |||
636 | crypto_drop_spawn(&ctx->spawn); | ||
637 | kfree(inst); | ||
638 | } | 804 | } |
639 | 805 | ||
640 | static struct crypto_template cryptd_tmpl = { | 806 | static struct crypto_template cryptd_tmpl = { |
@@ -724,6 +890,40 @@ void cryptd_free_ahash(struct cryptd_ahash *tfm) | |||
724 | } | 890 | } |
725 | EXPORT_SYMBOL_GPL(cryptd_free_ahash); | 891 | EXPORT_SYMBOL_GPL(cryptd_free_ahash); |
726 | 892 | ||
893 | struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, | ||
894 | u32 type, u32 mask) | ||
895 | { | ||
896 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | ||
897 | struct crypto_aead *tfm; | ||
898 | |||
899 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | ||
900 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | ||
901 | return ERR_PTR(-EINVAL); | ||
902 | tfm = crypto_alloc_aead(cryptd_alg_name, type, mask); | ||
903 | if (IS_ERR(tfm)) | ||
904 | return ERR_CAST(tfm); | ||
905 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { | ||
906 | crypto_free_aead(tfm); | ||
907 | return ERR_PTR(-EINVAL); | ||
908 | } | ||
909 | return __cryptd_aead_cast(tfm); | ||
910 | } | ||
911 | EXPORT_SYMBOL_GPL(cryptd_alloc_aead); | ||
912 | |||
913 | struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm) | ||
914 | { | ||
915 | struct cryptd_aead_ctx *ctx; | ||
916 | ctx = crypto_aead_ctx(&tfm->base); | ||
917 | return ctx->child; | ||
918 | } | ||
919 | EXPORT_SYMBOL_GPL(cryptd_aead_child); | ||
920 | |||
921 | void cryptd_free_aead(struct cryptd_aead *tfm) | ||
922 | { | ||
923 | crypto_free_aead(&tfm->base); | ||
924 | } | ||
925 | EXPORT_SYMBOL_GPL(cryptd_free_aead); | ||
926 | |||
727 | static int __init cryptd_init(void) | 927 | static int __init cryptd_init(void) |
728 | { | 928 | { |
729 | int err; | 929 | int err; |
diff --git a/crypto/crypto_wq.c b/crypto/crypto_wq.c index fdcf6248f152..b980ee1af459 100644 --- a/crypto/crypto_wq.c +++ b/crypto/crypto_wq.c | |||
@@ -20,7 +20,8 @@ EXPORT_SYMBOL_GPL(kcrypto_wq); | |||
20 | 20 | ||
21 | static int __init crypto_wq_init(void) | 21 | static int __init crypto_wq_init(void) |
22 | { | 22 | { |
23 | kcrypto_wq = create_workqueue("crypto"); | 23 | kcrypto_wq = alloc_workqueue("crypto", |
24 | WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1); | ||
24 | if (unlikely(!kcrypto_wq)) | 25 | if (unlikely(!kcrypto_wq)) |
25 | return -ENOMEM; | 26 | return -ENOMEM; |
26 | return 0; | 27 | return 0; |
diff --git a/crypto/deflate.c b/crypto/deflate.c index 463dc859aa05..b0165ecad0c5 100644 --- a/crypto/deflate.c +++ b/crypto/deflate.c | |||
@@ -32,7 +32,6 @@ | |||
32 | #include <linux/interrupt.h> | 32 | #include <linux/interrupt.h> |
33 | #include <linux/mm.h> | 33 | #include <linux/mm.h> |
34 | #include <linux/net.h> | 34 | #include <linux/net.h> |
35 | #include <linux/slab.h> | ||
36 | 35 | ||
37 | #define DEFLATE_DEF_LEVEL Z_DEFAULT_COMPRESSION | 36 | #define DEFLATE_DEF_LEVEL Z_DEFAULT_COMPRESSION |
38 | #define DEFLATE_DEF_WINBITS 11 | 37 | #define DEFLATE_DEF_WINBITS 11 |
@@ -48,12 +47,12 @@ static int deflate_comp_init(struct deflate_ctx *ctx) | |||
48 | int ret = 0; | 47 | int ret = 0; |
49 | struct z_stream_s *stream = &ctx->comp_stream; | 48 | struct z_stream_s *stream = &ctx->comp_stream; |
50 | 49 | ||
51 | stream->workspace = vmalloc(zlib_deflate_workspacesize()); | 50 | stream->workspace = vzalloc(zlib_deflate_workspacesize( |
51 | -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL)); | ||
52 | if (!stream->workspace) { | 52 | if (!stream->workspace) { |
53 | ret = -ENOMEM; | 53 | ret = -ENOMEM; |
54 | goto out; | 54 | goto out; |
55 | } | 55 | } |
56 | memset(stream->workspace, 0, zlib_deflate_workspacesize()); | ||
57 | ret = zlib_deflateInit2(stream, DEFLATE_DEF_LEVEL, Z_DEFLATED, | 56 | ret = zlib_deflateInit2(stream, DEFLATE_DEF_LEVEL, Z_DEFLATED, |
58 | -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL, | 57 | -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL, |
59 | Z_DEFAULT_STRATEGY); | 58 | Z_DEFAULT_STRATEGY); |
@@ -73,7 +72,7 @@ static int deflate_decomp_init(struct deflate_ctx *ctx) | |||
73 | int ret = 0; | 72 | int ret = 0; |
74 | struct z_stream_s *stream = &ctx->decomp_stream; | 73 | struct z_stream_s *stream = &ctx->decomp_stream; |
75 | 74 | ||
76 | stream->workspace = kzalloc(zlib_inflate_workspacesize(), GFP_KERNEL); | 75 | stream->workspace = vzalloc(zlib_inflate_workspacesize()); |
77 | if (!stream->workspace) { | 76 | if (!stream->workspace) { |
78 | ret = -ENOMEM; | 77 | ret = -ENOMEM; |
79 | goto out; | 78 | goto out; |
@@ -86,7 +85,7 @@ static int deflate_decomp_init(struct deflate_ctx *ctx) | |||
86 | out: | 85 | out: |
87 | return ret; | 86 | return ret; |
88 | out_free: | 87 | out_free: |
89 | kfree(stream->workspace); | 88 | vfree(stream->workspace); |
90 | goto out; | 89 | goto out; |
91 | } | 90 | } |
92 | 91 | ||
@@ -99,7 +98,7 @@ static void deflate_comp_exit(struct deflate_ctx *ctx) | |||
99 | static void deflate_decomp_exit(struct deflate_ctx *ctx) | 98 | static void deflate_decomp_exit(struct deflate_ctx *ctx) |
100 | { | 99 | { |
101 | zlib_inflateEnd(&ctx->decomp_stream); | 100 | zlib_inflateEnd(&ctx->decomp_stream); |
102 | kfree(ctx->decomp_stream.workspace); | 101 | vfree(ctx->decomp_stream.workspace); |
103 | } | 102 | } |
104 | 103 | ||
105 | static int deflate_init(struct crypto_tfm *tfm) | 104 | static int deflate_init(struct crypto_tfm *tfm) |
diff --git a/crypto/des_generic.c b/crypto/des_generic.c index 249f903cc453..873818d48e86 100644 --- a/crypto/des_generic.c +++ b/crypto/des_generic.c | |||
@@ -614,7 +614,7 @@ static const u32 S8[64] = { | |||
614 | #define T3(x) pt[2 * (x) + 2] | 614 | #define T3(x) pt[2 * (x) + 2] |
615 | #define T4(x) pt[2 * (x) + 3] | 615 | #define T4(x) pt[2 * (x) + 3] |
616 | 616 | ||
617 | #define PC2(a, b, c, d) (T4(d) | T3(c) | T2(b) | T1(a)) | 617 | #define DES_PC2(a, b, c, d) (T4(d) | T3(c) | T2(b) | T1(a)) |
618 | 618 | ||
619 | /* | 619 | /* |
620 | * Encryption key expansion | 620 | * Encryption key expansion |
@@ -639,22 +639,22 @@ unsigned long des_ekey(u32 *pe, const u8 *k) | |||
639 | b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b]; | 639 | b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b]; |
640 | a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a]; | 640 | a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a]; |
641 | 641 | ||
642 | pe[15 * 2 + 0] = PC2(a, b, c, d); d = rs[d]; | 642 | pe[15 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; |
643 | pe[14 * 2 + 0] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; | 643 | pe[14 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; |
644 | pe[13 * 2 + 0] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; | 644 | pe[13 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; |
645 | pe[12 * 2 + 0] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; | 645 | pe[12 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; |
646 | pe[11 * 2 + 0] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; | 646 | pe[11 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; |
647 | pe[10 * 2 + 0] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; | 647 | pe[10 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; |
648 | pe[ 9 * 2 + 0] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; | 648 | pe[ 9 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; |
649 | pe[ 8 * 2 + 0] = PC2(d, a, b, c); c = rs[c]; | 649 | pe[ 8 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; |
650 | pe[ 7 * 2 + 0] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; | 650 | pe[ 7 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; |
651 | pe[ 6 * 2 + 0] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; | 651 | pe[ 6 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; |
652 | pe[ 5 * 2 + 0] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; | 652 | pe[ 5 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; |
653 | pe[ 4 * 2 + 0] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; | 653 | pe[ 4 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; |
654 | pe[ 3 * 2 + 0] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; | 654 | pe[ 3 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; |
655 | pe[ 2 * 2 + 0] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; | 655 | pe[ 2 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; |
656 | pe[ 1 * 2 + 0] = PC2(c, d, a, b); b = rs[b]; | 656 | pe[ 1 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; |
657 | pe[ 0 * 2 + 0] = PC2(b, c, d, a); | 657 | pe[ 0 * 2 + 0] = DES_PC2(b, c, d, a); |
658 | 658 | ||
659 | /* Check if first half is weak */ | 659 | /* Check if first half is weak */ |
660 | w = (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]); | 660 | w = (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]); |
@@ -670,22 +670,22 @@ unsigned long des_ekey(u32 *pe, const u8 *k) | |||
670 | /* Check if second half is weak */ | 670 | /* Check if second half is weak */ |
671 | w |= (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]); | 671 | w |= (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]); |
672 | 672 | ||
673 | pe[15 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; | 673 | pe[15 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; |
674 | pe[14 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; | 674 | pe[14 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; |
675 | pe[13 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; | 675 | pe[13 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; |
676 | pe[12 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; | 676 | pe[12 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; |
677 | pe[11 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; | 677 | pe[11 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; |
678 | pe[10 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; | 678 | pe[10 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; |
679 | pe[ 9 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; | 679 | pe[ 9 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; |
680 | pe[ 8 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; | 680 | pe[ 8 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; |
681 | pe[ 7 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; | 681 | pe[ 7 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; |
682 | pe[ 6 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; | 682 | pe[ 6 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; |
683 | pe[ 5 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; | 683 | pe[ 5 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; |
684 | pe[ 4 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; | 684 | pe[ 4 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; |
685 | pe[ 3 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; | 685 | pe[ 3 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; |
686 | pe[ 2 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; | 686 | pe[ 2 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; |
687 | pe[ 1 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; | 687 | pe[ 1 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; |
688 | pe[ 0 * 2 + 1] = PC2(b, c, d, a); | 688 | pe[ 0 * 2 + 1] = DES_PC2(b, c, d, a); |
689 | 689 | ||
690 | /* Fixup: 2413 5768 -> 1357 2468 */ | 690 | /* Fixup: 2413 5768 -> 1357 2468 */ |
691 | for (d = 0; d < 16; ++d) { | 691 | for (d = 0; d < 16; ++d) { |
@@ -722,22 +722,22 @@ static void dkey(u32 *pe, const u8 *k) | |||
722 | b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b]; | 722 | b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b]; |
723 | a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a]; | 723 | a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a]; |
724 | 724 | ||
725 | pe[ 0 * 2] = PC2(a, b, c, d); d = rs[d]; | 725 | pe[ 0 * 2] = DES_PC2(a, b, c, d); d = rs[d]; |
726 | pe[ 1 * 2] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; | 726 | pe[ 1 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; |
727 | pe[ 2 * 2] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; | 727 | pe[ 2 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; |
728 | pe[ 3 * 2] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; | 728 | pe[ 3 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; |
729 | pe[ 4 * 2] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; | 729 | pe[ 4 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; |
730 | pe[ 5 * 2] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; | 730 | pe[ 5 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; |
731 | pe[ 6 * 2] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; | 731 | pe[ 6 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; |
732 | pe[ 7 * 2] = PC2(d, a, b, c); c = rs[c]; | 732 | pe[ 7 * 2] = DES_PC2(d, a, b, c); c = rs[c]; |
733 | pe[ 8 * 2] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; | 733 | pe[ 8 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; |
734 | pe[ 9 * 2] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; | 734 | pe[ 9 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; |
735 | pe[10 * 2] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; | 735 | pe[10 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; |
736 | pe[11 * 2] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; | 736 | pe[11 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; |
737 | pe[12 * 2] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; | 737 | pe[12 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; |
738 | pe[13 * 2] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; | 738 | pe[13 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; |
739 | pe[14 * 2] = PC2(c, d, a, b); b = rs[b]; | 739 | pe[14 * 2] = DES_PC2(c, d, a, b); b = rs[b]; |
740 | pe[15 * 2] = PC2(b, c, d, a); | 740 | pe[15 * 2] = DES_PC2(b, c, d, a); |
741 | 741 | ||
742 | /* Skip to next table set */ | 742 | /* Skip to next table set */ |
743 | pt += 512; | 743 | pt += 512; |
@@ -747,22 +747,22 @@ static void dkey(u32 *pe, const u8 *k) | |||
747 | b = k[2]; b &= 0xe0; b >>= 4; b |= k[6] & 0xf0; b = pc1[b + 1]; | 747 | b = k[2]; b &= 0xe0; b >>= 4; b |= k[6] & 0xf0; b = pc1[b + 1]; |
748 | a = k[3]; a &= 0xe0; a >>= 4; a |= k[7] & 0xf0; a = pc1[a + 1]; | 748 | a = k[3]; a &= 0xe0; a >>= 4; a |= k[7] & 0xf0; a = pc1[a + 1]; |
749 | 749 | ||
750 | pe[ 0 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; | 750 | pe[ 0 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; |
751 | pe[ 1 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; | 751 | pe[ 1 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; |
752 | pe[ 2 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; | 752 | pe[ 2 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; |
753 | pe[ 3 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; | 753 | pe[ 3 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; |
754 | pe[ 4 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; | 754 | pe[ 4 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; |
755 | pe[ 5 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; | 755 | pe[ 5 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; |
756 | pe[ 6 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; | 756 | pe[ 6 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; |
757 | pe[ 7 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; | 757 | pe[ 7 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; |
758 | pe[ 8 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; | 758 | pe[ 8 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; |
759 | pe[ 9 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; | 759 | pe[ 9 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; |
760 | pe[10 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; | 760 | pe[10 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; |
761 | pe[11 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; | 761 | pe[11 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; |
762 | pe[12 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; | 762 | pe[12 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; |
763 | pe[13 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; | 763 | pe[13 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; |
764 | pe[14 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; | 764 | pe[14 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; |
765 | pe[15 * 2 + 1] = PC2(b, c, d, a); | 765 | pe[15 * 2 + 1] = DES_PC2(b, c, d, a); |
766 | 766 | ||
767 | /* Fixup: 2413 5768 -> 1357 2468 */ | 767 | /* Fixup: 2413 5768 -> 1357 2468 */ |
768 | for (d = 0; d < 16; ++d) { | 768 | for (d = 0; d < 16; ++d) { |
diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c index 3ca3b669d5d5..42ce9f570aec 100644 --- a/crypto/eseqiv.c +++ b/crypto/eseqiv.c | |||
@@ -62,20 +62,6 @@ out: | |||
62 | skcipher_givcrypt_complete(req, err); | 62 | skcipher_givcrypt_complete(req, err); |
63 | } | 63 | } |
64 | 64 | ||
65 | static void eseqiv_chain(struct scatterlist *head, struct scatterlist *sg, | ||
66 | int chain) | ||
67 | { | ||
68 | if (chain) { | ||
69 | head->length += sg->length; | ||
70 | sg = scatterwalk_sg_next(sg); | ||
71 | } | ||
72 | |||
73 | if (sg) | ||
74 | scatterwalk_sg_chain(head, 2, sg); | ||
75 | else | ||
76 | sg_mark_end(head); | ||
77 | } | ||
78 | |||
79 | static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req) | 65 | static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req) |
80 | { | 66 | { |
81 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); | 67 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); |
@@ -124,13 +110,13 @@ static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req) | |||
124 | 110 | ||
125 | sg_init_table(reqctx->src, 2); | 111 | sg_init_table(reqctx->src, 2); |
126 | sg_set_buf(reqctx->src, giv, ivsize); | 112 | sg_set_buf(reqctx->src, giv, ivsize); |
127 | eseqiv_chain(reqctx->src, osrc, vsrc == giv + ivsize); | 113 | scatterwalk_crypto_chain(reqctx->src, osrc, vsrc == giv + ivsize, 2); |
128 | 114 | ||
129 | dst = reqctx->src; | 115 | dst = reqctx->src; |
130 | if (osrc != odst) { | 116 | if (osrc != odst) { |
131 | sg_init_table(reqctx->dst, 2); | 117 | sg_init_table(reqctx->dst, 2); |
132 | sg_set_buf(reqctx->dst, giv, ivsize); | 118 | sg_set_buf(reqctx->dst, giv, ivsize); |
133 | eseqiv_chain(reqctx->dst, odst, vdst == giv + ivsize); | 119 | scatterwalk_crypto_chain(reqctx->dst, odst, vdst == giv + ivsize, 2); |
134 | 120 | ||
135 | dst = reqctx->dst; | 121 | dst = reqctx->dst; |
136 | } | 122 | } |
diff --git a/crypto/gcm.c b/crypto/gcm.c index 2f5fbba6576c..1a252639ef91 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c | |||
@@ -1102,21 +1102,6 @@ static int crypto_rfc4543_setauthsize(struct crypto_aead *parent, | |||
1102 | return crypto_aead_setauthsize(ctx->child, authsize); | 1102 | return crypto_aead_setauthsize(ctx->child, authsize); |
1103 | } | 1103 | } |
1104 | 1104 | ||
1105 | /* this is the same as crypto_authenc_chain */ | ||
1106 | static void crypto_rfc4543_chain(struct scatterlist *head, | ||
1107 | struct scatterlist *sg, int chain) | ||
1108 | { | ||
1109 | if (chain) { | ||
1110 | head->length += sg->length; | ||
1111 | sg = scatterwalk_sg_next(sg); | ||
1112 | } | ||
1113 | |||
1114 | if (sg) | ||
1115 | scatterwalk_sg_chain(head, 2, sg); | ||
1116 | else | ||
1117 | sg_mark_end(head); | ||
1118 | } | ||
1119 | |||
1120 | static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req, | 1105 | static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req, |
1121 | int enc) | 1106 | int enc) |
1122 | { | 1107 | { |
@@ -1154,13 +1139,13 @@ static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req, | |||
1154 | 1139 | ||
1155 | sg_init_table(payload, 2); | 1140 | sg_init_table(payload, 2); |
1156 | sg_set_buf(payload, req->iv, 8); | 1141 | sg_set_buf(payload, req->iv, 8); |
1157 | crypto_rfc4543_chain(payload, dst, vdst == req->iv + 8); | 1142 | scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2); |
1158 | assoclen += 8 + req->cryptlen - (enc ? 0 : authsize); | 1143 | assoclen += 8 + req->cryptlen - (enc ? 0 : authsize); |
1159 | 1144 | ||
1160 | sg_init_table(assoc, 2); | 1145 | sg_init_table(assoc, 2); |
1161 | sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, | 1146 | sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, |
1162 | req->assoc->offset); | 1147 | req->assoc->offset); |
1163 | crypto_rfc4543_chain(assoc, payload, 0); | 1148 | scatterwalk_crypto_chain(assoc, payload, 0, 2); |
1164 | 1149 | ||
1165 | aead_request_set_tfm(subreq, ctx->child); | 1150 | aead_request_set_tfm(subreq, ctx->child); |
1166 | aead_request_set_callback(subreq, req->base.flags, req->base.complete, | 1151 | aead_request_set_callback(subreq, req->base.flags, req->base.complete, |
diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c index a90d260528d4..df35e4ccd07e 100644 --- a/crypto/gf128mul.c +++ b/crypto/gf128mul.c | |||
@@ -89,7 +89,7 @@ | |||
89 | } | 89 | } |
90 | 90 | ||
91 | /* Given the value i in 0..255 as the byte overflow when a field element | 91 | /* Given the value i in 0..255 as the byte overflow when a field element |
92 | in GHASH is multipled by x^8, this function will return the values that | 92 | in GHASH is multiplied by x^8, this function will return the values that |
93 | are generated in the lo 16-bit word of the field value by applying the | 93 | are generated in the lo 16-bit word of the field value by applying the |
94 | modular polynomial. The values lo_byte and hi_byte are returned via the | 94 | modular polynomial. The values lo_byte and hi_byte are returned via the |
95 | macro xp_fun(lo_byte, hi_byte) so that the values can be assembled into | 95 | macro xp_fun(lo_byte, hi_byte) so that the values can be assembled into |
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index de3078215fe6..29a89dad68b6 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c | |||
@@ -455,7 +455,8 @@ static int pcrypt_init_padata(struct padata_pcrypt *pcrypt, | |||
455 | 455 | ||
456 | get_online_cpus(); | 456 | get_online_cpus(); |
457 | 457 | ||
458 | pcrypt->wq = create_workqueue(name); | 458 | pcrypt->wq = alloc_workqueue(name, |
459 | WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1); | ||
459 | if (!pcrypt->wq) | 460 | if (!pcrypt->wq) |
460 | goto err; | 461 | goto err; |
461 | 462 | ||
@@ -504,7 +505,6 @@ err: | |||
504 | 505 | ||
505 | static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt) | 506 | static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt) |
506 | { | 507 | { |
507 | kobject_put(&pcrypt->pinst->kobj); | ||
508 | free_cpumask_var(pcrypt->cb_cpumask->mask); | 508 | free_cpumask_var(pcrypt->cb_cpumask->mask); |
509 | kfree(pcrypt->cb_cpumask); | 509 | kfree(pcrypt->cb_cpumask); |
510 | 510 | ||
diff --git a/crypto/rmd128.c b/crypto/rmd128.c index 1ceb6735aa53..8a0f68b7f257 100644 --- a/crypto/rmd128.c +++ b/crypto/rmd128.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC | 6 | * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC |
7 | * | 7 | * |
8 | * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch> | 8 | * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch> |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify it | 10 | * This program is free software; you can redistribute it and/or modify it |
11 | * under the terms of the GNU General Public License as published by the Free | 11 | * under the terms of the GNU General Public License as published by the Free |
@@ -325,4 +325,5 @@ module_init(rmd128_mod_init); | |||
325 | module_exit(rmd128_mod_fini); | 325 | module_exit(rmd128_mod_fini); |
326 | 326 | ||
327 | MODULE_LICENSE("GPL"); | 327 | MODULE_LICENSE("GPL"); |
328 | MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>"); | ||
328 | MODULE_DESCRIPTION("RIPEMD-128 Message Digest"); | 329 | MODULE_DESCRIPTION("RIPEMD-128 Message Digest"); |
diff --git a/crypto/rmd160.c b/crypto/rmd160.c index 472261fc913f..525d7bb752cf 100644 --- a/crypto/rmd160.c +++ b/crypto/rmd160.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC | 6 | * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC |
7 | * | 7 | * |
8 | * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch> | 8 | * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch> |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify it | 10 | * This program is free software; you can redistribute it and/or modify it |
11 | * under the terms of the GNU General Public License as published by the Free | 11 | * under the terms of the GNU General Public License as published by the Free |
@@ -369,4 +369,5 @@ module_init(rmd160_mod_init); | |||
369 | module_exit(rmd160_mod_fini); | 369 | module_exit(rmd160_mod_fini); |
370 | 370 | ||
371 | MODULE_LICENSE("GPL"); | 371 | MODULE_LICENSE("GPL"); |
372 | MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>"); | ||
372 | MODULE_DESCRIPTION("RIPEMD-160 Message Digest"); | 373 | MODULE_DESCRIPTION("RIPEMD-160 Message Digest"); |
diff --git a/crypto/rmd256.c b/crypto/rmd256.c index 72eafa8d2e7b..69293d9b56e0 100644 --- a/crypto/rmd256.c +++ b/crypto/rmd256.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC | 6 | * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC |
7 | * | 7 | * |
8 | * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch> | 8 | * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch> |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify it | 10 | * This program is free software; you can redistribute it and/or modify it |
11 | * under the terms of the GNU General Public License as published by the Free | 11 | * under the terms of the GNU General Public License as published by the Free |
@@ -344,4 +344,5 @@ module_init(rmd256_mod_init); | |||
344 | module_exit(rmd256_mod_fini); | 344 | module_exit(rmd256_mod_fini); |
345 | 345 | ||
346 | MODULE_LICENSE("GPL"); | 346 | MODULE_LICENSE("GPL"); |
347 | MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>"); | ||
347 | MODULE_DESCRIPTION("RIPEMD-256 Message Digest"); | 348 | MODULE_DESCRIPTION("RIPEMD-256 Message Digest"); |
diff --git a/crypto/rmd320.c b/crypto/rmd320.c index 86becaba2f05..09f97dfdfbba 100644 --- a/crypto/rmd320.c +++ b/crypto/rmd320.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC | 6 | * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC |
7 | * | 7 | * |
8 | * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch> | 8 | * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch> |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify it | 10 | * This program is free software; you can redistribute it and/or modify it |
11 | * under the terms of the GNU General Public License as published by the Free | 11 | * under the terms of the GNU General Public License as published by the Free |
@@ -393,4 +393,5 @@ module_init(rmd320_mod_init); | |||
393 | module_exit(rmd320_mod_fini); | 393 | module_exit(rmd320_mod_fini); |
394 | 394 | ||
395 | MODULE_LICENSE("GPL"); | 395 | MODULE_LICENSE("GPL"); |
396 | MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>"); | ||
396 | MODULE_DESCRIPTION("RIPEMD-320 Message Digest"); | 397 | MODULE_DESCRIPTION("RIPEMD-320 Message Digest"); |
diff --git a/crypto/shash.c b/crypto/shash.c index 22fd9433141f..76f74b963151 100644 --- a/crypto/shash.c +++ b/crypto/shash.c | |||
@@ -310,7 +310,13 @@ static int shash_async_export(struct ahash_request *req, void *out) | |||
310 | 310 | ||
311 | static int shash_async_import(struct ahash_request *req, const void *in) | 311 | static int shash_async_import(struct ahash_request *req, const void *in) |
312 | { | 312 | { |
313 | return crypto_shash_import(ahash_request_ctx(req), in); | 313 | struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); |
314 | struct shash_desc *desc = ahash_request_ctx(req); | ||
315 | |||
316 | desc->tfm = *ctx; | ||
317 | desc->flags = req->base.flags; | ||
318 | |||
319 | return crypto_shash_import(desc, in); | ||
314 | } | 320 | } |
315 | 321 | ||
316 | static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm) | 322 | static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm) |
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 3ca68f9fc14d..2222617b3bed 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c | |||
@@ -8,6 +8,13 @@ | |||
8 | * Copyright (c) 2002 Jean-Francois Dive <jef@linuxbe.org> | 8 | * Copyright (c) 2002 Jean-Francois Dive <jef@linuxbe.org> |
9 | * Copyright (c) 2007 Nokia Siemens Networks | 9 | * Copyright (c) 2007 Nokia Siemens Networks |
10 | * | 10 | * |
11 | * Updated RFC4106 AES-GCM testing. | ||
12 | * Authors: Aidan O'Mahony (aidan.o.mahony@intel.com) | ||
13 | * Adrian Hoban <adrian.hoban@intel.com> | ||
14 | * Gabriele Paoloni <gabriele.paoloni@intel.com> | ||
15 | * Tadeusz Struk (tadeusz.struk@intel.com) | ||
16 | * Copyright (c) 2010, Intel Corporation. | ||
17 | * | ||
11 | * This program is free software; you can redistribute it and/or modify it | 18 | * This program is free software; you can redistribute it and/or modify it |
12 | * under the terms of the GNU General Public License as published by the Free | 19 | * under the terms of the GNU General Public License as published by the Free |
13 | * Software Foundation; either version 2 of the License, or (at your option) | 20 | * Software Foundation; either version 2 of the License, or (at your option) |
@@ -139,7 +146,8 @@ static void test_cipher_speed(const char *algo, int enc, unsigned int sec, | |||
139 | unsigned int tcount, u8 *keysize) | 146 | unsigned int tcount, u8 *keysize) |
140 | { | 147 | { |
141 | unsigned int ret, i, j, iv_len; | 148 | unsigned int ret, i, j, iv_len; |
142 | const char *key, iv[128]; | 149 | const char *key; |
150 | char iv[128]; | ||
143 | struct crypto_blkcipher *tfm; | 151 | struct crypto_blkcipher *tfm; |
144 | struct blkcipher_desc desc; | 152 | struct blkcipher_desc desc; |
145 | const char *e; | 153 | const char *e; |
@@ -980,6 +988,10 @@ static int do_test(int m) | |||
980 | ret += tcrypt_test("ansi_cprng"); | 988 | ret += tcrypt_test("ansi_cprng"); |
981 | break; | 989 | break; |
982 | 990 | ||
991 | case 151: | ||
992 | ret += tcrypt_test("rfc4106(gcm(aes))"); | ||
993 | break; | ||
994 | |||
983 | case 200: | 995 | case 200: |
984 | test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, | 996 | test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, |
985 | speed_template_16_24_32); | 997 | speed_template_16_24_32); |
@@ -997,6 +1009,10 @@ static int do_test(int m) | |||
997 | speed_template_32_48_64); | 1009 | speed_template_32_48_64); |
998 | test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0, | 1010 | test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0, |
999 | speed_template_32_48_64); | 1011 | speed_template_32_48_64); |
1012 | test_cipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0, | ||
1013 | speed_template_16_24_32); | ||
1014 | test_cipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0, | ||
1015 | speed_template_16_24_32); | ||
1000 | break; | 1016 | break; |
1001 | 1017 | ||
1002 | case 201: | 1018 | case 201: |
diff --git a/crypto/testmgr.c b/crypto/testmgr.c index fa8c8f78c8d4..b6b93d416351 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c | |||
@@ -6,6 +6,13 @@ | |||
6 | * Copyright (c) 2007 Nokia Siemens Networks | 6 | * Copyright (c) 2007 Nokia Siemens Networks |
7 | * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au> | 7 | * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au> |
8 | * | 8 | * |
9 | * Updated RFC4106 AES-GCM testing. | ||
10 | * Authors: Aidan O'Mahony (aidan.o.mahony@intel.com) | ||
11 | * Adrian Hoban <adrian.hoban@intel.com> | ||
12 | * Gabriele Paoloni <gabriele.paoloni@intel.com> | ||
13 | * Tadeusz Struk (tadeusz.struk@intel.com) | ||
14 | * Copyright (c) 2010, Intel Corporation. | ||
15 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | 16 | * This program is free software; you can redistribute it and/or modify it |
10 | * under the terms of the GNU General Public License as published by the Free | 17 | * under the terms of the GNU General Public License as published by the Free |
11 | * Software Foundation; either version 2 of the License, or (at your option) | 18 | * Software Foundation; either version 2 of the License, or (at your option) |
@@ -2070,6 +2077,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
2070 | }, { | 2077 | }, { |
2071 | .alg = "ghash", | 2078 | .alg = "ghash", |
2072 | .test = alg_test_hash, | 2079 | .test = alg_test_hash, |
2080 | .fips_allowed = 1, | ||
2073 | .suite = { | 2081 | .suite = { |
2074 | .hash = { | 2082 | .hash = { |
2075 | .vecs = ghash_tv_template, | 2083 | .vecs = ghash_tv_template, |
@@ -2211,6 +2219,22 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
2211 | } | 2219 | } |
2212 | } | 2220 | } |
2213 | }, { | 2221 | }, { |
2222 | .alg = "ofb(aes)", | ||
2223 | .test = alg_test_skcipher, | ||
2224 | .fips_allowed = 1, | ||
2225 | .suite = { | ||
2226 | .cipher = { | ||
2227 | .enc = { | ||
2228 | .vecs = aes_ofb_enc_tv_template, | ||
2229 | .count = AES_OFB_ENC_TEST_VECTORS | ||
2230 | }, | ||
2231 | .dec = { | ||
2232 | .vecs = aes_ofb_dec_tv_template, | ||
2233 | .count = AES_OFB_DEC_TEST_VECTORS | ||
2234 | } | ||
2235 | } | ||
2236 | } | ||
2237 | }, { | ||
2214 | .alg = "pcbc(fcrypt)", | 2238 | .alg = "pcbc(fcrypt)", |
2215 | .test = alg_test_skcipher, | 2239 | .test = alg_test_skcipher, |
2216 | .suite = { | 2240 | .suite = { |
@@ -2242,6 +2266,23 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
2242 | } | 2266 | } |
2243 | } | 2267 | } |
2244 | }, { | 2268 | }, { |
2269 | .alg = "rfc4106(gcm(aes))", | ||
2270 | .test = alg_test_aead, | ||
2271 | .suite = { | ||
2272 | .aead = { | ||
2273 | .enc = { | ||
2274 | .vecs = aes_gcm_rfc4106_enc_tv_template, | ||
2275 | .count = AES_GCM_4106_ENC_TEST_VECTORS | ||
2276 | }, | ||
2277 | .dec = { | ||
2278 | .vecs = aes_gcm_rfc4106_dec_tv_template, | ||
2279 | .count = AES_GCM_4106_DEC_TEST_VECTORS | ||
2280 | } | ||
2281 | } | ||
2282 | } | ||
2283 | }, { | ||
2284 | |||
2285 | |||
2245 | .alg = "rfc4309(ccm(aes))", | 2286 | .alg = "rfc4309(ccm(aes))", |
2246 | .test = alg_test_aead, | 2287 | .test = alg_test_aead, |
2247 | .fips_allowed = 1, | 2288 | .fips_allowed = 1, |
@@ -2429,6 +2470,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
2429 | }, { | 2470 | }, { |
2430 | .alg = "xts(aes)", | 2471 | .alg = "xts(aes)", |
2431 | .test = alg_test_skcipher, | 2472 | .test = alg_test_skcipher, |
2473 | .fips_allowed = 1, | ||
2432 | .suite = { | 2474 | .suite = { |
2433 | .cipher = { | 2475 | .cipher = { |
2434 | .enc = { | 2476 | .enc = { |
diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 74e35377fd30..27e60619538e 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h | |||
@@ -6,6 +6,15 @@ | |||
6 | * Copyright (c) 2007 Nokia Siemens Networks | 6 | * Copyright (c) 2007 Nokia Siemens Networks |
7 | * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au> | 7 | * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au> |
8 | * | 8 | * |
9 | * Updated RFC4106 AES-GCM testing. Some test vectors were taken from | ||
10 | * http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/ | ||
11 | * gcm/gcm-test-vectors.tar.gz | ||
12 | * Authors: Aidan O'Mahony (aidan.o.mahony@intel.com) | ||
13 | * Adrian Hoban <adrian.hoban@intel.com> | ||
14 | * Gabriele Paoloni <gabriele.paoloni@intel.com> | ||
15 | * Tadeusz Struk (tadeusz.struk@intel.com) | ||
16 | * Copyright (c) 2010, Intel Corporation. | ||
17 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | 18 | * This program is free software; you can redistribute it and/or modify it |
10 | * under the terms of the GNU General Public License as published by the Free | 19 | * under the terms of the GNU General Public License as published by the Free |
11 | * Software Foundation; either version 2 of the License, or (at your option) | 20 | * Software Foundation; either version 2 of the License, or (at your option) |
@@ -442,8 +451,9 @@ static struct hash_testvec rmd320_tv_template[] = { | |||
442 | 451 | ||
443 | /* | 452 | /* |
444 | * SHA1 test vectors from from FIPS PUB 180-1 | 453 | * SHA1 test vectors from from FIPS PUB 180-1 |
454 | * Long vector from CAVS 5.0 | ||
445 | */ | 455 | */ |
446 | #define SHA1_TEST_VECTORS 2 | 456 | #define SHA1_TEST_VECTORS 3 |
447 | 457 | ||
448 | static struct hash_testvec sha1_tv_template[] = { | 458 | static struct hash_testvec sha1_tv_template[] = { |
449 | { | 459 | { |
@@ -458,6 +468,33 @@ static struct hash_testvec sha1_tv_template[] = { | |||
458 | "\x4a\xa1\xf9\x51\x29\xe5\xe5\x46\x70\xf1", | 468 | "\x4a\xa1\xf9\x51\x29\xe5\xe5\x46\x70\xf1", |
459 | .np = 2, | 469 | .np = 2, |
460 | .tap = { 28, 28 } | 470 | .tap = { 28, 28 } |
471 | }, { | ||
472 | .plaintext = "\xec\x29\x56\x12\x44\xed\xe7\x06" | ||
473 | "\xb6\xeb\x30\xa1\xc3\x71\xd7\x44" | ||
474 | "\x50\xa1\x05\xc3\xf9\x73\x5f\x7f" | ||
475 | "\xa9\xfe\x38\xcf\x67\xf3\x04\xa5" | ||
476 | "\x73\x6a\x10\x6e\x92\xe1\x71\x39" | ||
477 | "\xa6\x81\x3b\x1c\x81\xa4\xf3\xd3" | ||
478 | "\xfb\x95\x46\xab\x42\x96\xfa\x9f" | ||
479 | "\x72\x28\x26\xc0\x66\x86\x9e\xda" | ||
480 | "\xcd\x73\xb2\x54\x80\x35\x18\x58" | ||
481 | "\x13\xe2\x26\x34\xa9\xda\x44\x00" | ||
482 | "\x0d\x95\xa2\x81\xff\x9f\x26\x4e" | ||
483 | "\xcc\xe0\xa9\x31\x22\x21\x62\xd0" | ||
484 | "\x21\xcc\xa2\x8d\xb5\xf3\xc2\xaa" | ||
485 | "\x24\x94\x5a\xb1\xe3\x1c\xb4\x13" | ||
486 | "\xae\x29\x81\x0f\xd7\x94\xca\xd5" | ||
487 | "\xdf\xaf\x29\xec\x43\xcb\x38\xd1" | ||
488 | "\x98\xfe\x4a\xe1\xda\x23\x59\x78" | ||
489 | "\x02\x21\x40\x5b\xd6\x71\x2a\x53" | ||
490 | "\x05\xda\x4b\x1b\x73\x7f\xce\x7c" | ||
491 | "\xd2\x1c\x0e\xb7\x72\x8d\x08\x23" | ||
492 | "\x5a\x90\x11", | ||
493 | .psize = 163, | ||
494 | .digest = "\x97\x01\x11\xc4\xe7\x7b\xcc\x88\xcc\x20" | ||
495 | "\x45\x9c\x02\xb6\x9b\x4a\xa8\xf5\x82\x17", | ||
496 | .np = 4, | ||
497 | .tap = { 63, 64, 31, 5 } | ||
461 | } | 498 | } |
462 | }; | 499 | }; |
463 | 500 | ||
@@ -2943,10 +2980,14 @@ static struct cipher_testvec cast6_dec_tv_template[] = { | |||
2943 | #define AES_XTS_DEC_TEST_VECTORS 4 | 2980 | #define AES_XTS_DEC_TEST_VECTORS 4 |
2944 | #define AES_CTR_ENC_TEST_VECTORS 3 | 2981 | #define AES_CTR_ENC_TEST_VECTORS 3 |
2945 | #define AES_CTR_DEC_TEST_VECTORS 3 | 2982 | #define AES_CTR_DEC_TEST_VECTORS 3 |
2983 | #define AES_OFB_ENC_TEST_VECTORS 1 | ||
2984 | #define AES_OFB_DEC_TEST_VECTORS 1 | ||
2946 | #define AES_CTR_3686_ENC_TEST_VECTORS 7 | 2985 | #define AES_CTR_3686_ENC_TEST_VECTORS 7 |
2947 | #define AES_CTR_3686_DEC_TEST_VECTORS 6 | 2986 | #define AES_CTR_3686_DEC_TEST_VECTORS 6 |
2948 | #define AES_GCM_ENC_TEST_VECTORS 9 | 2987 | #define AES_GCM_ENC_TEST_VECTORS 9 |
2949 | #define AES_GCM_DEC_TEST_VECTORS 8 | 2988 | #define AES_GCM_DEC_TEST_VECTORS 8 |
2989 | #define AES_GCM_4106_ENC_TEST_VECTORS 7 | ||
2990 | #define AES_GCM_4106_DEC_TEST_VECTORS 7 | ||
2950 | #define AES_CCM_ENC_TEST_VECTORS 7 | 2991 | #define AES_CCM_ENC_TEST_VECTORS 7 |
2951 | #define AES_CCM_DEC_TEST_VECTORS 7 | 2992 | #define AES_CCM_DEC_TEST_VECTORS 7 |
2952 | #define AES_CCM_4309_ENC_TEST_VECTORS 7 | 2993 | #define AES_CCM_4309_ENC_TEST_VECTORS 7 |
@@ -5467,6 +5508,64 @@ static struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = { | |||
5467 | }, | 5508 | }, |
5468 | }; | 5509 | }; |
5469 | 5510 | ||
5511 | static struct cipher_testvec aes_ofb_enc_tv_template[] = { | ||
5512 | /* From NIST Special Publication 800-38A, Appendix F.5 */ | ||
5513 | { | ||
5514 | .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" | ||
5515 | "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", | ||
5516 | .klen = 16, | ||
5517 | .iv = "\x00\x01\x02\x03\x04\x05\x06\x07\x08" | ||
5518 | "\x09\x0a\x0b\x0c\x0d\x0e\x0f", | ||
5519 | .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" | ||
5520 | "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" | ||
5521 | "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" | ||
5522 | "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" | ||
5523 | "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" | ||
5524 | "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" | ||
5525 | "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" | ||
5526 | "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", | ||
5527 | .ilen = 64, | ||
5528 | .result = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20" | ||
5529 | "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a" | ||
5530 | "\x77\x89\x50\x8d\x16\x91\x8f\x03\xf5" | ||
5531 | "\x3c\x52\xda\xc5\x4e\xd8\x25" | ||
5532 | "\x97\x40\x05\x1e\x9c\x5f\xec\xf6\x43" | ||
5533 | "\x44\xf7\xa8\x22\x60\xed\xcc" | ||
5534 | "\x30\x4c\x65\x28\xf6\x59\xc7\x78" | ||
5535 | "\x66\xa5\x10\xd9\xc1\xd6\xae\x5e", | ||
5536 | .rlen = 64, | ||
5537 | } | ||
5538 | }; | ||
5539 | |||
5540 | static struct cipher_testvec aes_ofb_dec_tv_template[] = { | ||
5541 | /* From NIST Special Publication 800-38A, Appendix F.5 */ | ||
5542 | { | ||
5543 | .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" | ||
5544 | "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", | ||
5545 | .klen = 16, | ||
5546 | .iv = "\x00\x01\x02\x03\x04\x05\x06\x07\x08" | ||
5547 | "\x09\x0a\x0b\x0c\x0d\x0e\x0f", | ||
5548 | .input = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20" | ||
5549 | "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a" | ||
5550 | "\x77\x89\x50\x8d\x16\x91\x8f\x03\xf5" | ||
5551 | "\x3c\x52\xda\xc5\x4e\xd8\x25" | ||
5552 | "\x97\x40\x05\x1e\x9c\x5f\xec\xf6\x43" | ||
5553 | "\x44\xf7\xa8\x22\x60\xed\xcc" | ||
5554 | "\x30\x4c\x65\x28\xf6\x59\xc7\x78" | ||
5555 | "\x66\xa5\x10\xd9\xc1\xd6\xae\x5e", | ||
5556 | .ilen = 64, | ||
5557 | .result = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" | ||
5558 | "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" | ||
5559 | "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" | ||
5560 | "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" | ||
5561 | "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" | ||
5562 | "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" | ||
5563 | "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" | ||
5564 | "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", | ||
5565 | .rlen = 64, | ||
5566 | } | ||
5567 | }; | ||
5568 | |||
5470 | static struct aead_testvec aes_gcm_enc_tv_template[] = { | 5569 | static struct aead_testvec aes_gcm_enc_tv_template[] = { |
5471 | { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */ | 5570 | { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */ |
5472 | .key = zeroed_string, | 5571 | .key = zeroed_string, |
@@ -5829,6 +5928,356 @@ static struct aead_testvec aes_gcm_dec_tv_template[] = { | |||
5829 | } | 5928 | } |
5830 | }; | 5929 | }; |
5831 | 5930 | ||
5931 | static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { | ||
5932 | { /* Generated using Crypto++ */ | ||
5933 | .key = zeroed_string, | ||
5934 | .klen = 20, | ||
5935 | .iv = zeroed_string, | ||
5936 | .input = zeroed_string, | ||
5937 | .ilen = 16, | ||
5938 | .assoc = zeroed_string, | ||
5939 | .alen = 8, | ||
5940 | .result = "\x03\x88\xDA\xCE\x60\xB6\xA3\x92" | ||
5941 | "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78" | ||
5942 | "\x97\xFE\x4C\x23\x37\x42\x01\xE0" | ||
5943 | "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B", | ||
5944 | .rlen = 32, | ||
5945 | },{ | ||
5946 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | ||
5947 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | ||
5948 | "\x00\x00\x00\x00", | ||
5949 | .klen = 20, | ||
5950 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x01" | ||
5951 | "\x00\x00\x00\x00", | ||
5952 | .input = zeroed_string, | ||
5953 | .ilen = 16, | ||
5954 | .assoc = zeroed_string, | ||
5955 | .alen = 8, | ||
5956 | .result = "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18" | ||
5957 | "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28" | ||
5958 | "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D" | ||
5959 | "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF", | ||
5960 | .rlen = 32, | ||
5961 | |||
5962 | }, { | ||
5963 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | ||
5964 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | ||
5965 | "\x00\x00\x00\x00", | ||
5966 | .klen = 20, | ||
5967 | .iv = zeroed_string, | ||
5968 | .input = "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
5969 | "\x01\x01\x01\x01\x01\x01\x01\x01", | ||
5970 | .ilen = 16, | ||
5971 | .assoc = zeroed_string, | ||
5972 | .alen = 8, | ||
5973 | .result = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE" | ||
5974 | "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC" | ||
5975 | "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C" | ||
5976 | "\xB1\x68\xFD\x14\x52\x64\x61\xB2", | ||
5977 | .rlen = 32, | ||
5978 | }, { | ||
5979 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | ||
5980 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | ||
5981 | "\x00\x00\x00\x00", | ||
5982 | .klen = 20, | ||
5983 | .iv = zeroed_string, | ||
5984 | .input = "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
5985 | "\x01\x01\x01\x01\x01\x01\x01\x01", | ||
5986 | .ilen = 16, | ||
5987 | .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01", | ||
5988 | .alen = 8, | ||
5989 | .result = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE" | ||
5990 | "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC" | ||
5991 | "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63" | ||
5992 | "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5", | ||
5993 | .rlen = 32, | ||
5994 | }, { | ||
5995 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | ||
5996 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | ||
5997 | "\x00\x00\x00\x00", | ||
5998 | .klen = 20, | ||
5999 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x01" | ||
6000 | "\x00\x00\x00\x00", | ||
6001 | .input = "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
6002 | "\x01\x01\x01\x01\x01\x01\x01\x01", | ||
6003 | .ilen = 16, | ||
6004 | .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01", | ||
6005 | .alen = 8, | ||
6006 | .result = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19" | ||
6007 | "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29" | ||
6008 | "\x64\x50\xF9\x32\x13\xFB\x74\x61" | ||
6009 | "\xF4\xED\x52\xD3\xC5\x10\x55\x3C", | ||
6010 | .rlen = 32, | ||
6011 | }, { | ||
6012 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | ||
6013 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | ||
6014 | "\x00\x00\x00\x00", | ||
6015 | .klen = 20, | ||
6016 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x01" | ||
6017 | "\x00\x00\x00\x00", | ||
6018 | .input = "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
6019 | "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
6020 | "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
6021 | "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
6022 | "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
6023 | "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
6024 | "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
6025 | "\x01\x01\x01\x01\x01\x01\x01\x01", | ||
6026 | .ilen = 64, | ||
6027 | .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01", | ||
6028 | .alen = 8, | ||
6029 | .result = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19" | ||
6030 | "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29" | ||
6031 | "\x98\x14\xA1\x42\x37\x80\xFD\x90" | ||
6032 | "\x68\x12\x01\xA8\x91\x89\xB9\x83" | ||
6033 | "\x5B\x11\x77\x12\x9B\xFF\x24\x89" | ||
6034 | "\x94\x5F\x18\x12\xBA\x27\x09\x39" | ||
6035 | "\x99\x96\x76\x42\x15\x1C\xCD\xCB" | ||
6036 | "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD" | ||
6037 | "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85" | ||
6038 | "\xBD\xCF\x62\x98\x58\x14\xE5\xBD", | ||
6039 | .rlen = 80, | ||
6040 | }, { | ||
6041 | .key = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
6042 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" | ||
6043 | "\x00\x00\x00\x00", | ||
6044 | .klen = 20, | ||
6045 | .iv = "\x00\x00\x45\x67\x89\xab\xcd\xef" | ||
6046 | "\x00\x00\x00\x00", | ||
6047 | .input = "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6048 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6049 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6050 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6051 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6052 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6053 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6054 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6055 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6056 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6057 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6058 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6059 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6060 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6061 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6062 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6063 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6064 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6065 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6066 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6067 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6068 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6069 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6070 | "\xff\xff\xff\xff\xff\xff\xff\xff", | ||
6071 | .ilen = 192, | ||
6072 | .assoc = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" | ||
6073 | "\xaa\xaa\xaa\xaa", | ||
6074 | .alen = 12, | ||
6075 | .result = "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE" | ||
6076 | "\xDE\x89\x3D\x42\xE7\xC9\x69\x8A" | ||
6077 | "\x44\x6D\xC3\x88\x46\x2E\xC2\x01" | ||
6078 | "\x5E\xF6\x0C\x39\xF0\xC4\xA5\x82" | ||
6079 | "\xCD\xE8\x31\xCC\x0A\x4C\xE4\x44" | ||
6080 | "\x41\xA9\x82\x6F\x22\xA1\x23\x1A" | ||
6081 | "\xA8\xE3\x16\xFD\x31\x5C\x27\x31" | ||
6082 | "\xF1\x7F\x01\x63\xA3\xAF\x70\xA1" | ||
6083 | "\xCF\x07\x57\x41\x67\xD0\xC4\x42" | ||
6084 | "\xDB\x18\xC6\x4C\x4C\xE0\x3D\x9F" | ||
6085 | "\x05\x07\xFB\x13\x7D\x4A\xCA\x5B" | ||
6086 | "\xF0\xBF\x64\x7E\x05\xB1\x72\xEE" | ||
6087 | "\x7C\x3B\xD4\xCD\x14\x03\xB2\x2C" | ||
6088 | "\xD3\xA9\xEE\xFA\x17\xFC\x9C\xDF" | ||
6089 | "\xC7\x75\x40\xFF\xAE\xAD\x1E\x59" | ||
6090 | "\x2F\x30\x24\xFB\xAD\x6B\x10\xFA" | ||
6091 | "\x6C\x9F\x5B\xE7\x25\xD5\xD0\x25" | ||
6092 | "\xAC\x4A\x4B\xDA\xFC\x7A\x85\x1B" | ||
6093 | "\x7E\x13\x06\x82\x08\x17\xA4\x35" | ||
6094 | "\xEC\xC5\x8D\x63\x96\x81\x0A\x8F" | ||
6095 | "\xA3\x05\x38\x95\x20\x1A\x47\x04" | ||
6096 | "\x6F\x6D\xDA\x8F\xEF\xC1\x76\x35" | ||
6097 | "\x6B\xC7\x4D\x0F\x94\x12\xCA\x3E" | ||
6098 | "\x2E\xD5\x03\x2E\x86\x7E\xAA\x3B" | ||
6099 | "\x37\x08\x1C\xCF\xBA\x5D\x71\x46" | ||
6100 | "\x80\x72\xB0\x4C\x82\x0D\x60\x3C", | ||
6101 | .rlen = 208, | ||
6102 | } | ||
6103 | }; | ||
6104 | |||
6105 | static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = { | ||
6106 | { /* Generated using Crypto++ */ | ||
6107 | .key = zeroed_string, | ||
6108 | .klen = 20, | ||
6109 | .iv = zeroed_string, | ||
6110 | .input = "\x03\x88\xDA\xCE\x60\xB6\xA3\x92" | ||
6111 | "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78" | ||
6112 | "\x97\xFE\x4C\x23\x37\x42\x01\xE0" | ||
6113 | "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B", | ||
6114 | .ilen = 32, | ||
6115 | .assoc = zeroed_string, | ||
6116 | .alen = 8, | ||
6117 | .result = zeroed_string, | ||
6118 | .rlen = 16, | ||
6119 | |||
6120 | },{ | ||
6121 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | ||
6122 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | ||
6123 | "\x00\x00\x00\x00", | ||
6124 | .klen = 20, | ||
6125 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x01" | ||
6126 | "\x00\x00\x00\x00", | ||
6127 | .input = "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18" | ||
6128 | "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28" | ||
6129 | "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D" | ||
6130 | "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF", | ||
6131 | .ilen = 32, | ||
6132 | .assoc = zeroed_string, | ||
6133 | .alen = 8, | ||
6134 | .result = zeroed_string, | ||
6135 | .rlen = 16, | ||
6136 | }, { | ||
6137 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | ||
6138 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | ||
6139 | "\x00\x00\x00\x00", | ||
6140 | .klen = 20, | ||
6141 | .iv = zeroed_string, | ||
6142 | .input = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE" | ||
6143 | "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC" | ||
6144 | "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C" | ||
6145 | "\xB1\x68\xFD\x14\x52\x64\x61\xB2", | ||
6146 | .ilen = 32, | ||
6147 | .assoc = zeroed_string, | ||
6148 | .alen = 8, | ||
6149 | .result = "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
6150 | "\x01\x01\x01\x01\x01\x01\x01\x01", | ||
6151 | .rlen = 16, | ||
6152 | }, { | ||
6153 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | ||
6154 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | ||
6155 | "\x00\x00\x00\x00", | ||
6156 | .klen = 20, | ||
6157 | .iv = zeroed_string, | ||
6158 | .input = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE" | ||
6159 | "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC" | ||
6160 | "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63" | ||
6161 | "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5", | ||
6162 | .ilen = 32, | ||
6163 | .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01", | ||
6164 | .alen = 8, | ||
6165 | .result = "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
6166 | "\x01\x01\x01\x01\x01\x01\x01\x01", | ||
6167 | .rlen = 16, | ||
6168 | |||
6169 | }, { | ||
6170 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | ||
6171 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | ||
6172 | "\x00\x00\x00\x00", | ||
6173 | .klen = 20, | ||
6174 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x01" | ||
6175 | "\x00\x00\x00\x00", | ||
6176 | .input = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19" | ||
6177 | "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29" | ||
6178 | "\x64\x50\xF9\x32\x13\xFB\x74\x61" | ||
6179 | "\xF4\xED\x52\xD3\xC5\x10\x55\x3C", | ||
6180 | .ilen = 32, | ||
6181 | .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01", | ||
6182 | .alen = 8, | ||
6183 | .result = "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
6184 | "\x01\x01\x01\x01\x01\x01\x01\x01", | ||
6185 | .rlen = 16, | ||
6186 | }, { | ||
6187 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | ||
6188 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | ||
6189 | "\x00\x00\x00\x00", | ||
6190 | .klen = 20, | ||
6191 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x01" | ||
6192 | "\x00\x00\x00\x00", | ||
6193 | .input = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19" | ||
6194 | "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29" | ||
6195 | "\x98\x14\xA1\x42\x37\x80\xFD\x90" | ||
6196 | "\x68\x12\x01\xA8\x91\x89\xB9\x83" | ||
6197 | "\x5B\x11\x77\x12\x9B\xFF\x24\x89" | ||
6198 | "\x94\x5F\x18\x12\xBA\x27\x09\x39" | ||
6199 | "\x99\x96\x76\x42\x15\x1C\xCD\xCB" | ||
6200 | "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD" | ||
6201 | "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85" | ||
6202 | "\xBD\xCF\x62\x98\x58\x14\xE5\xBD", | ||
6203 | .ilen = 80, | ||
6204 | .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01", | ||
6205 | .alen = 8, | ||
6206 | .result = "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
6207 | "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
6208 | "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
6209 | "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
6210 | "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
6211 | "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
6212 | "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
6213 | "\x01\x01\x01\x01\x01\x01\x01\x01", | ||
6214 | .rlen = 64, | ||
6215 | }, { | ||
6216 | .key = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
6217 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" | ||
6218 | "\x00\x00\x00\x00", | ||
6219 | .klen = 20, | ||
6220 | .iv = "\x00\x00\x45\x67\x89\xab\xcd\xef" | ||
6221 | "\x00\x00\x00\x00", | ||
6222 | .input = "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE" | ||
6223 | "\xDE\x89\x3D\x42\xE7\xC9\x69\x8A" | ||
6224 | "\x44\x6D\xC3\x88\x46\x2E\xC2\x01" | ||
6225 | "\x5E\xF6\x0C\x39\xF0\xC4\xA5\x82" | ||
6226 | "\xCD\xE8\x31\xCC\x0A\x4C\xE4\x44" | ||
6227 | "\x41\xA9\x82\x6F\x22\xA1\x23\x1A" | ||
6228 | "\xA8\xE3\x16\xFD\x31\x5C\x27\x31" | ||
6229 | "\xF1\x7F\x01\x63\xA3\xAF\x70\xA1" | ||
6230 | "\xCF\x07\x57\x41\x67\xD0\xC4\x42" | ||
6231 | "\xDB\x18\xC6\x4C\x4C\xE0\x3D\x9F" | ||
6232 | "\x05\x07\xFB\x13\x7D\x4A\xCA\x5B" | ||
6233 | "\xF0\xBF\x64\x7E\x05\xB1\x72\xEE" | ||
6234 | "\x7C\x3B\xD4\xCD\x14\x03\xB2\x2C" | ||
6235 | "\xD3\xA9\xEE\xFA\x17\xFC\x9C\xDF" | ||
6236 | "\xC7\x75\x40\xFF\xAE\xAD\x1E\x59" | ||
6237 | "\x2F\x30\x24\xFB\xAD\x6B\x10\xFA" | ||
6238 | "\x6C\x9F\x5B\xE7\x25\xD5\xD0\x25" | ||
6239 | "\xAC\x4A\x4B\xDA\xFC\x7A\x85\x1B" | ||
6240 | "\x7E\x13\x06\x82\x08\x17\xA4\x35" | ||
6241 | "\xEC\xC5\x8D\x63\x96\x81\x0A\x8F" | ||
6242 | "\xA3\x05\x38\x95\x20\x1A\x47\x04" | ||
6243 | "\x6F\x6D\xDA\x8F\xEF\xC1\x76\x35" | ||
6244 | "\x6B\xC7\x4D\x0F\x94\x12\xCA\x3E" | ||
6245 | "\x2E\xD5\x03\x2E\x86\x7E\xAA\x3B" | ||
6246 | "\x37\x08\x1C\xCF\xBA\x5D\x71\x46" | ||
6247 | "\x80\x72\xB0\x4C\x82\x0D\x60\x3C", | ||
6248 | .ilen = 208, | ||
6249 | .assoc = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" | ||
6250 | "\xaa\xaa\xaa\xaa", | ||
6251 | .alen = 12, | ||
6252 | .result = "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6253 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6254 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6255 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6256 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6257 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6258 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6259 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6260 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6261 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6262 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6263 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6264 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6265 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6266 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6267 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6268 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6269 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6270 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6271 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6272 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6273 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6274 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
6275 | "\xff\xff\xff\xff\xff\xff\xff\xff", | ||
6276 | .rlen = 192, | ||
6277 | |||
6278 | } | ||
6279 | }; | ||
6280 | |||
5832 | static struct aead_testvec aes_ccm_enc_tv_template[] = { | 6281 | static struct aead_testvec aes_ccm_enc_tv_template[] = { |
5833 | { /* From RFC 3610 */ | 6282 | { /* From RFC 3610 */ |
5834 | .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" | 6283 | .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" |
diff --git a/crypto/vmac.c b/crypto/vmac.c index 0999274a27ac..f35ff8a3926e 100644 --- a/crypto/vmac.c +++ b/crypto/vmac.c | |||
@@ -95,7 +95,7 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ | |||
95 | 95 | ||
96 | /* | 96 | /* |
97 | * For highest performance the L1 NH and L2 polynomial hashes should be | 97 | * For highest performance the L1 NH and L2 polynomial hashes should be |
98 | * carefully implemented to take advantage of one's target architechture. | 98 | * carefully implemented to take advantage of one's target architecture. |
99 | * Here these two hash functions are defined multiple time; once for | 99 | * Here these two hash functions are defined multiple time; once for |
100 | * 64-bit architectures, once for 32-bit SSE2 architectures, and once | 100 | * 64-bit architectures, once for 32-bit SSE2 architectures, and once |
101 | * for the rest (32-bit) architectures. | 101 | * for the rest (32-bit) architectures. |
diff --git a/crypto/xts.c b/crypto/xts.c index 555ecaab1e54..851705446c82 100644 --- a/crypto/xts.c +++ b/crypto/xts.c | |||
@@ -45,7 +45,7 @@ static int setkey(struct crypto_tfm *parent, const u8 *key, | |||
45 | return -EINVAL; | 45 | return -EINVAL; |
46 | } | 46 | } |
47 | 47 | ||
48 | /* we need two cipher instances: one to compute the inital 'tweak' | 48 | /* we need two cipher instances: one to compute the initial 'tweak' |
49 | * by encrypting the IV (usually the 'plain' iv) and the other | 49 | * by encrypting the IV (usually the 'plain' iv) and the other |
50 | * one to encrypt and decrypt the data */ | 50 | * one to encrypt and decrypt the data */ |
51 | 51 | ||
diff --git a/crypto/zlib.c b/crypto/zlib.c index c3015733c990..06b62e5cdcc7 100644 --- a/crypto/zlib.c +++ b/crypto/zlib.c | |||
@@ -29,7 +29,6 @@ | |||
29 | #include <linux/interrupt.h> | 29 | #include <linux/interrupt.h> |
30 | #include <linux/mm.h> | 30 | #include <linux/mm.h> |
31 | #include <linux/net.h> | 31 | #include <linux/net.h> |
32 | #include <linux/slab.h> | ||
33 | 32 | ||
34 | #include <crypto/internal/compress.h> | 33 | #include <crypto/internal/compress.h> |
35 | 34 | ||
@@ -60,7 +59,7 @@ static void zlib_decomp_exit(struct zlib_ctx *ctx) | |||
60 | 59 | ||
61 | if (stream->workspace) { | 60 | if (stream->workspace) { |
62 | zlib_inflateEnd(stream); | 61 | zlib_inflateEnd(stream); |
63 | kfree(stream->workspace); | 62 | vfree(stream->workspace); |
64 | stream->workspace = NULL; | 63 | stream->workspace = NULL; |
65 | } | 64 | } |
66 | } | 65 | } |
@@ -85,6 +84,7 @@ static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params, | |||
85 | struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); | 84 | struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); |
86 | struct z_stream_s *stream = &ctx->comp_stream; | 85 | struct z_stream_s *stream = &ctx->comp_stream; |
87 | struct nlattr *tb[ZLIB_COMP_MAX + 1]; | 86 | struct nlattr *tb[ZLIB_COMP_MAX + 1]; |
87 | int window_bits, mem_level; | ||
88 | size_t workspacesize; | 88 | size_t workspacesize; |
89 | int ret; | 89 | int ret; |
90 | 90 | ||
@@ -94,12 +94,18 @@ static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params, | |||
94 | 94 | ||
95 | zlib_comp_exit(ctx); | 95 | zlib_comp_exit(ctx); |
96 | 96 | ||
97 | workspacesize = zlib_deflate_workspacesize(); | 97 | window_bits = tb[ZLIB_COMP_WINDOWBITS] |
98 | stream->workspace = vmalloc(workspacesize); | 98 | ? nla_get_u32(tb[ZLIB_COMP_WINDOWBITS]) |
99 | : MAX_WBITS; | ||
100 | mem_level = tb[ZLIB_COMP_MEMLEVEL] | ||
101 | ? nla_get_u32(tb[ZLIB_COMP_MEMLEVEL]) | ||
102 | : DEF_MEM_LEVEL; | ||
103 | |||
104 | workspacesize = zlib_deflate_workspacesize(window_bits, mem_level); | ||
105 | stream->workspace = vzalloc(workspacesize); | ||
99 | if (!stream->workspace) | 106 | if (!stream->workspace) |
100 | return -ENOMEM; | 107 | return -ENOMEM; |
101 | 108 | ||
102 | memset(stream->workspace, 0, workspacesize); | ||
103 | ret = zlib_deflateInit2(stream, | 109 | ret = zlib_deflateInit2(stream, |
104 | tb[ZLIB_COMP_LEVEL] | 110 | tb[ZLIB_COMP_LEVEL] |
105 | ? nla_get_u32(tb[ZLIB_COMP_LEVEL]) | 111 | ? nla_get_u32(tb[ZLIB_COMP_LEVEL]) |
@@ -107,12 +113,8 @@ static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params, | |||
107 | tb[ZLIB_COMP_METHOD] | 113 | tb[ZLIB_COMP_METHOD] |
108 | ? nla_get_u32(tb[ZLIB_COMP_METHOD]) | 114 | ? nla_get_u32(tb[ZLIB_COMP_METHOD]) |
109 | : Z_DEFLATED, | 115 | : Z_DEFLATED, |
110 | tb[ZLIB_COMP_WINDOWBITS] | 116 | window_bits, |
111 | ? nla_get_u32(tb[ZLIB_COMP_WINDOWBITS]) | 117 | mem_level, |
112 | : MAX_WBITS, | ||
113 | tb[ZLIB_COMP_MEMLEVEL] | ||
114 | ? nla_get_u32(tb[ZLIB_COMP_MEMLEVEL]) | ||
115 | : DEF_MEM_LEVEL, | ||
116 | tb[ZLIB_COMP_STRATEGY] | 118 | tb[ZLIB_COMP_STRATEGY] |
117 | ? nla_get_u32(tb[ZLIB_COMP_STRATEGY]) | 119 | ? nla_get_u32(tb[ZLIB_COMP_STRATEGY]) |
118 | : Z_DEFAULT_STRATEGY); | 120 | : Z_DEFAULT_STRATEGY); |
@@ -225,13 +227,13 @@ static int zlib_decompress_setup(struct crypto_pcomp *tfm, void *params, | |||
225 | ? nla_get_u32(tb[ZLIB_DECOMP_WINDOWBITS]) | 227 | ? nla_get_u32(tb[ZLIB_DECOMP_WINDOWBITS]) |
226 | : DEF_WBITS; | 228 | : DEF_WBITS; |
227 | 229 | ||
228 | stream->workspace = kzalloc(zlib_inflate_workspacesize(), GFP_KERNEL); | 230 | stream->workspace = vzalloc(zlib_inflate_workspacesize()); |
229 | if (!stream->workspace) | 231 | if (!stream->workspace) |
230 | return -ENOMEM; | 232 | return -ENOMEM; |
231 | 233 | ||
232 | ret = zlib_inflateInit2(stream, ctx->decomp_windowBits); | 234 | ret = zlib_inflateInit2(stream, ctx->decomp_windowBits); |
233 | if (ret != Z_OK) { | 235 | if (ret != Z_OK) { |
234 | kfree(stream->workspace); | 236 | vfree(stream->workspace); |
235 | stream->workspace = NULL; | 237 | stream->workspace = NULL; |
236 | return -EINVAL; | 238 | return -EINVAL; |
237 | } | 239 | } |