diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-14 16:31:29 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-14 16:31:29 -0500 |
commit | 0f1d6dfe03ca4e36132221b918499c6f0b0f048d (patch) | |
tree | 0de8e9330610190a23e173ca7d7f3fb74a517aa2 /crypto | |
parent | d05c5f7ba164aed3db02fb188c26d0dd94f5455b (diff) | |
parent | 04b46fbdea5e31ffd745a34fa61269a69ba9f47a (diff) |
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
"Here is the crypto update for 4.10:
API:
- add skcipher walk interface
- add asynchronous compression (acomp) interface
- fix algif_aed AIO handling of zero buffer
Algorithms:
- fix unaligned access in poly1305
- fix DRBG output to large buffers
Drivers:
- add support for iMX6UL to caam
- fix givenc descriptors (used by IPsec) in caam
- accelerated SHA256/SHA512 for ARM64 from OpenSSL
- add SSE CRCT10DIF and CRC32 to ARM/ARM64
- add AEAD support to Chelsio chcr
- add Armada 8K support to omap-rng"
* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (148 commits)
crypto: testmgr - fix overlap in chunked tests again
crypto: arm/crc32 - accelerated support based on x86 SSE implementation
crypto: arm64/crc32 - accelerated support based on x86 SSE implementation
crypto: arm/crct10dif - port x86 SSE implementation to ARM
crypto: arm64/crct10dif - port x86 SSE implementation to arm64
crypto: testmgr - add/enhance test cases for CRC-T10DIF
crypto: testmgr - avoid overlap in chunked tests
crypto: chcr - checking for IS_ERR() instead of NULL
crypto: caam - check caam_emi_slow instead of re-lookup platform
crypto: algif_aead - fix AIO handling of zero buffer
crypto: aes-ce - Make aes_simd_algs static
crypto: algif_skcipher - set error code when kcalloc fails
crypto: caam - make aamalg_desc a proper module
crypto: caam - pass key buffers with typesafe pointers
crypto: arm64/aes-ce-ccm - Fix AEAD decryption length
MAINTAINERS: add crypto headers to crypto entry
crypt: doc - remove misleading mention of async API
crypto: doc - fix header file name
crypto: api - fix comment typo
crypto: skcipher - Add separate walker for AEAD decryption
..
Diffstat (limited to 'crypto')
40 files changed, 3300 insertions, 963 deletions
diff --git a/crypto/842.c b/crypto/842.c index 98e387efb8c8..bc26dc942821 100644 --- a/crypto/842.c +++ b/crypto/842.c | |||
@@ -31,11 +31,46 @@ | |||
31 | #include <linux/module.h> | 31 | #include <linux/module.h> |
32 | #include <linux/crypto.h> | 32 | #include <linux/crypto.h> |
33 | #include <linux/sw842.h> | 33 | #include <linux/sw842.h> |
34 | #include <crypto/internal/scompress.h> | ||
34 | 35 | ||
35 | struct crypto842_ctx { | 36 | struct crypto842_ctx { |
36 | char wmem[SW842_MEM_COMPRESS]; /* working memory for compress */ | 37 | void *wmem; /* working memory for compress */ |
37 | }; | 38 | }; |
38 | 39 | ||
40 | static void *crypto842_alloc_ctx(struct crypto_scomp *tfm) | ||
41 | { | ||
42 | void *ctx; | ||
43 | |||
44 | ctx = kmalloc(SW842_MEM_COMPRESS, GFP_KERNEL); | ||
45 | if (!ctx) | ||
46 | return ERR_PTR(-ENOMEM); | ||
47 | |||
48 | return ctx; | ||
49 | } | ||
50 | |||
51 | static int crypto842_init(struct crypto_tfm *tfm) | ||
52 | { | ||
53 | struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm); | ||
54 | |||
55 | ctx->wmem = crypto842_alloc_ctx(NULL); | ||
56 | if (IS_ERR(ctx->wmem)) | ||
57 | return -ENOMEM; | ||
58 | |||
59 | return 0; | ||
60 | } | ||
61 | |||
62 | static void crypto842_free_ctx(struct crypto_scomp *tfm, void *ctx) | ||
63 | { | ||
64 | kfree(ctx); | ||
65 | } | ||
66 | |||
67 | static void crypto842_exit(struct crypto_tfm *tfm) | ||
68 | { | ||
69 | struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm); | ||
70 | |||
71 | crypto842_free_ctx(NULL, ctx->wmem); | ||
72 | } | ||
73 | |||
39 | static int crypto842_compress(struct crypto_tfm *tfm, | 74 | static int crypto842_compress(struct crypto_tfm *tfm, |
40 | const u8 *src, unsigned int slen, | 75 | const u8 *src, unsigned int slen, |
41 | u8 *dst, unsigned int *dlen) | 76 | u8 *dst, unsigned int *dlen) |
@@ -45,6 +80,13 @@ static int crypto842_compress(struct crypto_tfm *tfm, | |||
45 | return sw842_compress(src, slen, dst, dlen, ctx->wmem); | 80 | return sw842_compress(src, slen, dst, dlen, ctx->wmem); |
46 | } | 81 | } |
47 | 82 | ||
83 | static int crypto842_scompress(struct crypto_scomp *tfm, | ||
84 | const u8 *src, unsigned int slen, | ||
85 | u8 *dst, unsigned int *dlen, void *ctx) | ||
86 | { | ||
87 | return sw842_compress(src, slen, dst, dlen, ctx); | ||
88 | } | ||
89 | |||
48 | static int crypto842_decompress(struct crypto_tfm *tfm, | 90 | static int crypto842_decompress(struct crypto_tfm *tfm, |
49 | const u8 *src, unsigned int slen, | 91 | const u8 *src, unsigned int slen, |
50 | u8 *dst, unsigned int *dlen) | 92 | u8 *dst, unsigned int *dlen) |
@@ -52,6 +94,13 @@ static int crypto842_decompress(struct crypto_tfm *tfm, | |||
52 | return sw842_decompress(src, slen, dst, dlen); | 94 | return sw842_decompress(src, slen, dst, dlen); |
53 | } | 95 | } |
54 | 96 | ||
97 | static int crypto842_sdecompress(struct crypto_scomp *tfm, | ||
98 | const u8 *src, unsigned int slen, | ||
99 | u8 *dst, unsigned int *dlen, void *ctx) | ||
100 | { | ||
101 | return sw842_decompress(src, slen, dst, dlen); | ||
102 | } | ||
103 | |||
55 | static struct crypto_alg alg = { | 104 | static struct crypto_alg alg = { |
56 | .cra_name = "842", | 105 | .cra_name = "842", |
57 | .cra_driver_name = "842-generic", | 106 | .cra_driver_name = "842-generic", |
@@ -59,20 +108,48 @@ static struct crypto_alg alg = { | |||
59 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, | 108 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, |
60 | .cra_ctxsize = sizeof(struct crypto842_ctx), | 109 | .cra_ctxsize = sizeof(struct crypto842_ctx), |
61 | .cra_module = THIS_MODULE, | 110 | .cra_module = THIS_MODULE, |
111 | .cra_init = crypto842_init, | ||
112 | .cra_exit = crypto842_exit, | ||
62 | .cra_u = { .compress = { | 113 | .cra_u = { .compress = { |
63 | .coa_compress = crypto842_compress, | 114 | .coa_compress = crypto842_compress, |
64 | .coa_decompress = crypto842_decompress } } | 115 | .coa_decompress = crypto842_decompress } } |
65 | }; | 116 | }; |
66 | 117 | ||
118 | static struct scomp_alg scomp = { | ||
119 | .alloc_ctx = crypto842_alloc_ctx, | ||
120 | .free_ctx = crypto842_free_ctx, | ||
121 | .compress = crypto842_scompress, | ||
122 | .decompress = crypto842_sdecompress, | ||
123 | .base = { | ||
124 | .cra_name = "842", | ||
125 | .cra_driver_name = "842-scomp", | ||
126 | .cra_priority = 100, | ||
127 | .cra_module = THIS_MODULE, | ||
128 | } | ||
129 | }; | ||
130 | |||
67 | static int __init crypto842_mod_init(void) | 131 | static int __init crypto842_mod_init(void) |
68 | { | 132 | { |
69 | return crypto_register_alg(&alg); | 133 | int ret; |
134 | |||
135 | ret = crypto_register_alg(&alg); | ||
136 | if (ret) | ||
137 | return ret; | ||
138 | |||
139 | ret = crypto_register_scomp(&scomp); | ||
140 | if (ret) { | ||
141 | crypto_unregister_alg(&alg); | ||
142 | return ret; | ||
143 | } | ||
144 | |||
145 | return ret; | ||
70 | } | 146 | } |
71 | module_init(crypto842_mod_init); | 147 | module_init(crypto842_mod_init); |
72 | 148 | ||
73 | static void __exit crypto842_mod_exit(void) | 149 | static void __exit crypto842_mod_exit(void) |
74 | { | 150 | { |
75 | crypto_unregister_alg(&alg); | 151 | crypto_unregister_alg(&alg); |
152 | crypto_unregister_scomp(&scomp); | ||
76 | } | 153 | } |
77 | module_exit(crypto842_mod_exit); | 154 | module_exit(crypto842_mod_exit); |
78 | 155 | ||
diff --git a/crypto/Kconfig b/crypto/Kconfig index 84d71482bf08..160f08e721cc 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
@@ -24,7 +24,7 @@ comment "Crypto core or helper" | |||
24 | config CRYPTO_FIPS | 24 | config CRYPTO_FIPS |
25 | bool "FIPS 200 compliance" | 25 | bool "FIPS 200 compliance" |
26 | depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && !CRYPTO_MANAGER_DISABLE_TESTS | 26 | depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && !CRYPTO_MANAGER_DISABLE_TESTS |
27 | depends on MODULE_SIG | 27 | depends on (MODULE_SIG || !MODULES) |
28 | help | 28 | help |
29 | This options enables the fips boot option which is | 29 | This options enables the fips boot option which is |
30 | required if you want to system to operate in a FIPS 200 | 30 | required if you want to system to operate in a FIPS 200 |
@@ -102,6 +102,15 @@ config CRYPTO_KPP | |||
102 | select CRYPTO_ALGAPI | 102 | select CRYPTO_ALGAPI |
103 | select CRYPTO_KPP2 | 103 | select CRYPTO_KPP2 |
104 | 104 | ||
105 | config CRYPTO_ACOMP2 | ||
106 | tristate | ||
107 | select CRYPTO_ALGAPI2 | ||
108 | |||
109 | config CRYPTO_ACOMP | ||
110 | tristate | ||
111 | select CRYPTO_ALGAPI | ||
112 | select CRYPTO_ACOMP2 | ||
113 | |||
105 | config CRYPTO_RSA | 114 | config CRYPTO_RSA |
106 | tristate "RSA algorithm" | 115 | tristate "RSA algorithm" |
107 | select CRYPTO_AKCIPHER | 116 | select CRYPTO_AKCIPHER |
@@ -138,6 +147,7 @@ config CRYPTO_MANAGER2 | |||
138 | select CRYPTO_BLKCIPHER2 | 147 | select CRYPTO_BLKCIPHER2 |
139 | select CRYPTO_AKCIPHER2 | 148 | select CRYPTO_AKCIPHER2 |
140 | select CRYPTO_KPP2 | 149 | select CRYPTO_KPP2 |
150 | select CRYPTO_ACOMP2 | ||
141 | 151 | ||
142 | config CRYPTO_USER | 152 | config CRYPTO_USER |
143 | tristate "Userspace cryptographic algorithm configuration" | 153 | tristate "Userspace cryptographic algorithm configuration" |
@@ -236,10 +246,14 @@ config CRYPTO_ABLK_HELPER | |||
236 | tristate | 246 | tristate |
237 | select CRYPTO_CRYPTD | 247 | select CRYPTO_CRYPTD |
238 | 248 | ||
249 | config CRYPTO_SIMD | ||
250 | tristate | ||
251 | select CRYPTO_CRYPTD | ||
252 | |||
239 | config CRYPTO_GLUE_HELPER_X86 | 253 | config CRYPTO_GLUE_HELPER_X86 |
240 | tristate | 254 | tristate |
241 | depends on X86 | 255 | depends on X86 |
242 | select CRYPTO_ALGAPI | 256 | select CRYPTO_BLKCIPHER |
243 | 257 | ||
244 | config CRYPTO_ENGINE | 258 | config CRYPTO_ENGINE |
245 | tristate | 259 | tristate |
@@ -437,7 +451,7 @@ config CRYPTO_CRC32C_INTEL | |||
437 | gain performance compared with software implementation. | 451 | gain performance compared with software implementation. |
438 | Module will be crc32c-intel. | 452 | Module will be crc32c-intel. |
439 | 453 | ||
440 | config CRYPT_CRC32C_VPMSUM | 454 | config CRYPTO_CRC32C_VPMSUM |
441 | tristate "CRC32c CRC algorithm (powerpc64)" | 455 | tristate "CRC32c CRC algorithm (powerpc64)" |
442 | depends on PPC64 && ALTIVEC | 456 | depends on PPC64 && ALTIVEC |
443 | select CRYPTO_HASH | 457 | select CRYPTO_HASH |
@@ -928,14 +942,13 @@ config CRYPTO_AES_X86_64 | |||
928 | config CRYPTO_AES_NI_INTEL | 942 | config CRYPTO_AES_NI_INTEL |
929 | tristate "AES cipher algorithms (AES-NI)" | 943 | tristate "AES cipher algorithms (AES-NI)" |
930 | depends on X86 | 944 | depends on X86 |
945 | select CRYPTO_AEAD | ||
931 | select CRYPTO_AES_X86_64 if 64BIT | 946 | select CRYPTO_AES_X86_64 if 64BIT |
932 | select CRYPTO_AES_586 if !64BIT | 947 | select CRYPTO_AES_586 if !64BIT |
933 | select CRYPTO_CRYPTD | ||
934 | select CRYPTO_ABLK_HELPER | ||
935 | select CRYPTO_ALGAPI | 948 | select CRYPTO_ALGAPI |
949 | select CRYPTO_BLKCIPHER | ||
936 | select CRYPTO_GLUE_HELPER_X86 if 64BIT | 950 | select CRYPTO_GLUE_HELPER_X86 if 64BIT |
937 | select CRYPTO_LRW | 951 | select CRYPTO_SIMD |
938 | select CRYPTO_XTS | ||
939 | help | 952 | help |
940 | Use Intel AES-NI instructions for AES algorithm. | 953 | Use Intel AES-NI instructions for AES algorithm. |
941 | 954 | ||
@@ -1568,6 +1581,7 @@ comment "Compression" | |||
1568 | config CRYPTO_DEFLATE | 1581 | config CRYPTO_DEFLATE |
1569 | tristate "Deflate compression algorithm" | 1582 | tristate "Deflate compression algorithm" |
1570 | select CRYPTO_ALGAPI | 1583 | select CRYPTO_ALGAPI |
1584 | select CRYPTO_ACOMP2 | ||
1571 | select ZLIB_INFLATE | 1585 | select ZLIB_INFLATE |
1572 | select ZLIB_DEFLATE | 1586 | select ZLIB_DEFLATE |
1573 | help | 1587 | help |
@@ -1579,6 +1593,7 @@ config CRYPTO_DEFLATE | |||
1579 | config CRYPTO_LZO | 1593 | config CRYPTO_LZO |
1580 | tristate "LZO compression algorithm" | 1594 | tristate "LZO compression algorithm" |
1581 | select CRYPTO_ALGAPI | 1595 | select CRYPTO_ALGAPI |
1596 | select CRYPTO_ACOMP2 | ||
1582 | select LZO_COMPRESS | 1597 | select LZO_COMPRESS |
1583 | select LZO_DECOMPRESS | 1598 | select LZO_DECOMPRESS |
1584 | help | 1599 | help |
@@ -1587,6 +1602,7 @@ config CRYPTO_LZO | |||
1587 | config CRYPTO_842 | 1602 | config CRYPTO_842 |
1588 | tristate "842 compression algorithm" | 1603 | tristate "842 compression algorithm" |
1589 | select CRYPTO_ALGAPI | 1604 | select CRYPTO_ALGAPI |
1605 | select CRYPTO_ACOMP2 | ||
1590 | select 842_COMPRESS | 1606 | select 842_COMPRESS |
1591 | select 842_DECOMPRESS | 1607 | select 842_DECOMPRESS |
1592 | help | 1608 | help |
@@ -1595,6 +1611,7 @@ config CRYPTO_842 | |||
1595 | config CRYPTO_LZ4 | 1611 | config CRYPTO_LZ4 |
1596 | tristate "LZ4 compression algorithm" | 1612 | tristate "LZ4 compression algorithm" |
1597 | select CRYPTO_ALGAPI | 1613 | select CRYPTO_ALGAPI |
1614 | select CRYPTO_ACOMP2 | ||
1598 | select LZ4_COMPRESS | 1615 | select LZ4_COMPRESS |
1599 | select LZ4_DECOMPRESS | 1616 | select LZ4_DECOMPRESS |
1600 | help | 1617 | help |
@@ -1603,6 +1620,7 @@ config CRYPTO_LZ4 | |||
1603 | config CRYPTO_LZ4HC | 1620 | config CRYPTO_LZ4HC |
1604 | tristate "LZ4HC compression algorithm" | 1621 | tristate "LZ4HC compression algorithm" |
1605 | select CRYPTO_ALGAPI | 1622 | select CRYPTO_ALGAPI |
1623 | select CRYPTO_ACOMP2 | ||
1606 | select LZ4HC_COMPRESS | 1624 | select LZ4HC_COMPRESS |
1607 | select LZ4_DECOMPRESS | 1625 | select LZ4_DECOMPRESS |
1608 | help | 1626 | help |
diff --git a/crypto/Makefile b/crypto/Makefile index bd6a029094e6..b8f0e3eb0791 100644 --- a/crypto/Makefile +++ b/crypto/Makefile | |||
@@ -51,6 +51,10 @@ rsa_generic-y += rsa_helper.o | |||
51 | rsa_generic-y += rsa-pkcs1pad.o | 51 | rsa_generic-y += rsa-pkcs1pad.o |
52 | obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o | 52 | obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o |
53 | 53 | ||
54 | crypto_acompress-y := acompress.o | ||
55 | crypto_acompress-y += scompress.o | ||
56 | obj-$(CONFIG_CRYPTO_ACOMP2) += crypto_acompress.o | ||
57 | |||
54 | cryptomgr-y := algboss.o testmgr.o | 58 | cryptomgr-y := algboss.o testmgr.o |
55 | 59 | ||
56 | obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o | 60 | obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o |
@@ -139,3 +143,5 @@ obj-$(CONFIG_ASYNC_CORE) += async_tx/ | |||
139 | obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/ | 143 | obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/ |
140 | obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o | 144 | obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o |
141 | obj-$(CONFIG_CRYPTO_ABLK_HELPER) += ablk_helper.o | 145 | obj-$(CONFIG_CRYPTO_ABLK_HELPER) += ablk_helper.o |
146 | crypto_simd-y := simd.o | ||
147 | obj-$(CONFIG_CRYPTO_SIMD) += crypto_simd.o | ||
diff --git a/crypto/acompress.c b/crypto/acompress.c new file mode 100644 index 000000000000..887783d8e9a9 --- /dev/null +++ b/crypto/acompress.c | |||
@@ -0,0 +1,169 @@ | |||
1 | /* | ||
2 | * Asynchronous Compression operations | ||
3 | * | ||
4 | * Copyright (c) 2016, Intel Corporation | ||
5 | * Authors: Weigang Li <weigang.li@intel.com> | ||
6 | * Giovanni Cabiddu <giovanni.cabiddu@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the Free | ||
10 | * Software Foundation; either version 2 of the License, or (at your option) | ||
11 | * any later version. | ||
12 | * | ||
13 | */ | ||
14 | #include <linux/errno.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/seq_file.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/string.h> | ||
20 | #include <linux/crypto.h> | ||
21 | #include <crypto/algapi.h> | ||
22 | #include <linux/cryptouser.h> | ||
23 | #include <net/netlink.h> | ||
24 | #include <crypto/internal/acompress.h> | ||
25 | #include <crypto/internal/scompress.h> | ||
26 | #include "internal.h" | ||
27 | |||
28 | static const struct crypto_type crypto_acomp_type; | ||
29 | |||
30 | #ifdef CONFIG_NET | ||
31 | static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg) | ||
32 | { | ||
33 | struct crypto_report_acomp racomp; | ||
34 | |||
35 | strncpy(racomp.type, "acomp", sizeof(racomp.type)); | ||
36 | |||
37 | if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, | ||
38 | sizeof(struct crypto_report_acomp), &racomp)) | ||
39 | goto nla_put_failure; | ||
40 | return 0; | ||
41 | |||
42 | nla_put_failure: | ||
43 | return -EMSGSIZE; | ||
44 | } | ||
45 | #else | ||
46 | static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg) | ||
47 | { | ||
48 | return -ENOSYS; | ||
49 | } | ||
50 | #endif | ||
51 | |||
52 | static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg) | ||
53 | __attribute__ ((unused)); | ||
54 | |||
55 | static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg) | ||
56 | { | ||
57 | seq_puts(m, "type : acomp\n"); | ||
58 | } | ||
59 | |||
60 | static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm) | ||
61 | { | ||
62 | struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm); | ||
63 | struct acomp_alg *alg = crypto_acomp_alg(acomp); | ||
64 | |||
65 | alg->exit(acomp); | ||
66 | } | ||
67 | |||
68 | static int crypto_acomp_init_tfm(struct crypto_tfm *tfm) | ||
69 | { | ||
70 | struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm); | ||
71 | struct acomp_alg *alg = crypto_acomp_alg(acomp); | ||
72 | |||
73 | if (tfm->__crt_alg->cra_type != &crypto_acomp_type) | ||
74 | return crypto_init_scomp_ops_async(tfm); | ||
75 | |||
76 | acomp->compress = alg->compress; | ||
77 | acomp->decompress = alg->decompress; | ||
78 | acomp->dst_free = alg->dst_free; | ||
79 | acomp->reqsize = alg->reqsize; | ||
80 | |||
81 | if (alg->exit) | ||
82 | acomp->base.exit = crypto_acomp_exit_tfm; | ||
83 | |||
84 | if (alg->init) | ||
85 | return alg->init(acomp); | ||
86 | |||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | static unsigned int crypto_acomp_extsize(struct crypto_alg *alg) | ||
91 | { | ||
92 | int extsize = crypto_alg_extsize(alg); | ||
93 | |||
94 | if (alg->cra_type != &crypto_acomp_type) | ||
95 | extsize += sizeof(struct crypto_scomp *); | ||
96 | |||
97 | return extsize; | ||
98 | } | ||
99 | |||
100 | static const struct crypto_type crypto_acomp_type = { | ||
101 | .extsize = crypto_acomp_extsize, | ||
102 | .init_tfm = crypto_acomp_init_tfm, | ||
103 | #ifdef CONFIG_PROC_FS | ||
104 | .show = crypto_acomp_show, | ||
105 | #endif | ||
106 | .report = crypto_acomp_report, | ||
107 | .maskclear = ~CRYPTO_ALG_TYPE_MASK, | ||
108 | .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK, | ||
109 | .type = CRYPTO_ALG_TYPE_ACOMPRESS, | ||
110 | .tfmsize = offsetof(struct crypto_acomp, base), | ||
111 | }; | ||
112 | |||
113 | struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, | ||
114 | u32 mask) | ||
115 | { | ||
116 | return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask); | ||
117 | } | ||
118 | EXPORT_SYMBOL_GPL(crypto_alloc_acomp); | ||
119 | |||
120 | struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp) | ||
121 | { | ||
122 | struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); | ||
123 | struct acomp_req *req; | ||
124 | |||
125 | req = __acomp_request_alloc(acomp); | ||
126 | if (req && (tfm->__crt_alg->cra_type != &crypto_acomp_type)) | ||
127 | return crypto_acomp_scomp_alloc_ctx(req); | ||
128 | |||
129 | return req; | ||
130 | } | ||
131 | EXPORT_SYMBOL_GPL(acomp_request_alloc); | ||
132 | |||
133 | void acomp_request_free(struct acomp_req *req) | ||
134 | { | ||
135 | struct crypto_acomp *acomp = crypto_acomp_reqtfm(req); | ||
136 | struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); | ||
137 | |||
138 | if (tfm->__crt_alg->cra_type != &crypto_acomp_type) | ||
139 | crypto_acomp_scomp_free_ctx(req); | ||
140 | |||
141 | if (req->flags & CRYPTO_ACOMP_ALLOC_OUTPUT) { | ||
142 | acomp->dst_free(req->dst); | ||
143 | req->dst = NULL; | ||
144 | } | ||
145 | |||
146 | __acomp_request_free(req); | ||
147 | } | ||
148 | EXPORT_SYMBOL_GPL(acomp_request_free); | ||
149 | |||
150 | int crypto_register_acomp(struct acomp_alg *alg) | ||
151 | { | ||
152 | struct crypto_alg *base = &alg->base; | ||
153 | |||
154 | base->cra_type = &crypto_acomp_type; | ||
155 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; | ||
156 | base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS; | ||
157 | |||
158 | return crypto_register_alg(base); | ||
159 | } | ||
160 | EXPORT_SYMBOL_GPL(crypto_register_acomp); | ||
161 | |||
162 | int crypto_unregister_acomp(struct acomp_alg *alg) | ||
163 | { | ||
164 | return crypto_unregister_alg(&alg->base); | ||
165 | } | ||
166 | EXPORT_SYMBOL_GPL(crypto_unregister_acomp); | ||
167 | |||
168 | MODULE_LICENSE("GPL"); | ||
169 | MODULE_DESCRIPTION("Asynchronous compression type"); | ||
diff --git a/crypto/algboss.c b/crypto/algboss.c index 6e39d9c05b98..ccb85e1798f2 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c | |||
@@ -247,12 +247,8 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg) | |||
247 | memcpy(param->alg, alg->cra_name, sizeof(param->alg)); | 247 | memcpy(param->alg, alg->cra_name, sizeof(param->alg)); |
248 | type = alg->cra_flags; | 248 | type = alg->cra_flags; |
249 | 249 | ||
250 | /* This piece of crap needs to disappear into per-type test hooks. */ | 250 | /* Do not test internal algorithms. */ |
251 | if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) & | 251 | if (type & CRYPTO_ALG_INTERNAL) |
252 | CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) && | ||
253 | ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == | ||
254 | CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize : | ||
255 | alg->cra_ablkcipher.ivsize)) | ||
256 | type |= CRYPTO_ALG_TESTED; | 252 | type |= CRYPTO_ALG_TESTED; |
257 | 253 | ||
258 | param->type = type; | 254 | param->type = type; |
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 235f54d4f8a9..668ef402c6eb 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c | |||
@@ -454,12 +454,13 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg, | |||
454 | used -= ctx->aead_assoclen; | 454 | used -= ctx->aead_assoclen; |
455 | 455 | ||
456 | /* take over all tx sgls from ctx */ | 456 | /* take over all tx sgls from ctx */ |
457 | areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * sgl->cur, | 457 | areq->tsgl = sock_kmalloc(sk, |
458 | sizeof(*areq->tsgl) * max_t(u32, sgl->cur, 1), | ||
458 | GFP_KERNEL); | 459 | GFP_KERNEL); |
459 | if (unlikely(!areq->tsgl)) | 460 | if (unlikely(!areq->tsgl)) |
460 | goto free; | 461 | goto free; |
461 | 462 | ||
462 | sg_init_table(areq->tsgl, sgl->cur); | 463 | sg_init_table(areq->tsgl, max_t(u32, sgl->cur, 1)); |
463 | for (i = 0; i < sgl->cur; i++) | 464 | for (i = 0; i < sgl->cur; i++) |
464 | sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]), | 465 | sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]), |
465 | sgl->sg[i].length, sgl->sg[i].offset); | 466 | sgl->sg[i].length, sgl->sg[i].offset); |
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 1e38aaa8303e..a9e79d8eff87 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c | |||
@@ -566,8 +566,10 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg, | |||
566 | * need to expand */ | 566 | * need to expand */ |
567 | tmp = kcalloc(tx_nents * 2, sizeof(*tmp), | 567 | tmp = kcalloc(tx_nents * 2, sizeof(*tmp), |
568 | GFP_KERNEL); | 568 | GFP_KERNEL); |
569 | if (!tmp) | 569 | if (!tmp) { |
570 | err = -ENOMEM; | ||
570 | goto free; | 571 | goto free; |
572 | } | ||
571 | 573 | ||
572 | sg_init_table(tmp, tx_nents * 2); | 574 | sg_init_table(tmp, tx_nents * 2); |
573 | for (x = 0; x < tx_nents; x++) | 575 | for (x = 0; x < tx_nents; x++) |
diff --git a/crypto/api.c b/crypto/api.c index bbc147cb5dec..b16ce1653284 100644 --- a/crypto/api.c +++ b/crypto/api.c | |||
@@ -211,8 +211,8 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask) | |||
211 | if (!name) | 211 | if (!name) |
212 | return ERR_PTR(-ENOENT); | 212 | return ERR_PTR(-ENOENT); |
213 | 213 | ||
214 | type &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD); | ||
214 | mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD); | 215 | mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD); |
215 | type &= mask; | ||
216 | 216 | ||
217 | alg = crypto_alg_lookup(name, type, mask); | 217 | alg = crypto_alg_lookup(name, type, mask); |
218 | if (!alg) { | 218 | if (!alg) { |
@@ -310,24 +310,8 @@ static void crypto_exit_ops(struct crypto_tfm *tfm) | |||
310 | { | 310 | { |
311 | const struct crypto_type *type = tfm->__crt_alg->cra_type; | 311 | const struct crypto_type *type = tfm->__crt_alg->cra_type; |
312 | 312 | ||
313 | if (type) { | 313 | if (type && tfm->exit) |
314 | if (tfm->exit) | 314 | tfm->exit(tfm); |
315 | tfm->exit(tfm); | ||
316 | return; | ||
317 | } | ||
318 | |||
319 | switch (crypto_tfm_alg_type(tfm)) { | ||
320 | case CRYPTO_ALG_TYPE_CIPHER: | ||
321 | crypto_exit_cipher_ops(tfm); | ||
322 | break; | ||
323 | |||
324 | case CRYPTO_ALG_TYPE_COMPRESS: | ||
325 | crypto_exit_compress_ops(tfm); | ||
326 | break; | ||
327 | |||
328 | default: | ||
329 | BUG(); | ||
330 | } | ||
331 | } | 315 | } |
332 | 316 | ||
333 | static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) | 317 | static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) |
diff --git a/crypto/authenc.c b/crypto/authenc.c index a7e1ac786c5d..875470b0e026 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c | |||
@@ -324,7 +324,7 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm) | |||
324 | if (IS_ERR(auth)) | 324 | if (IS_ERR(auth)) |
325 | return PTR_ERR(auth); | 325 | return PTR_ERR(auth); |
326 | 326 | ||
327 | enc = crypto_spawn_skcipher2(&ictx->enc); | 327 | enc = crypto_spawn_skcipher(&ictx->enc); |
328 | err = PTR_ERR(enc); | 328 | err = PTR_ERR(enc); |
329 | if (IS_ERR(enc)) | 329 | if (IS_ERR(enc)) |
330 | goto err_free_ahash; | 330 | goto err_free_ahash; |
@@ -420,9 +420,9 @@ static int crypto_authenc_create(struct crypto_template *tmpl, | |||
420 | goto err_free_inst; | 420 | goto err_free_inst; |
421 | 421 | ||
422 | crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst)); | 422 | crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst)); |
423 | err = crypto_grab_skcipher2(&ctx->enc, enc_name, 0, | 423 | err = crypto_grab_skcipher(&ctx->enc, enc_name, 0, |
424 | crypto_requires_sync(algt->type, | 424 | crypto_requires_sync(algt->type, |
425 | algt->mask)); | 425 | algt->mask)); |
426 | if (err) | 426 | if (err) |
427 | goto err_drop_auth; | 427 | goto err_drop_auth; |
428 | 428 | ||
diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 121010ac9962..6f8f6b86bfe2 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c | |||
@@ -342,7 +342,7 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm) | |||
342 | if (IS_ERR(auth)) | 342 | if (IS_ERR(auth)) |
343 | return PTR_ERR(auth); | 343 | return PTR_ERR(auth); |
344 | 344 | ||
345 | enc = crypto_spawn_skcipher2(&ictx->enc); | 345 | enc = crypto_spawn_skcipher(&ictx->enc); |
346 | err = PTR_ERR(enc); | 346 | err = PTR_ERR(enc); |
347 | if (IS_ERR(enc)) | 347 | if (IS_ERR(enc)) |
348 | goto err_free_ahash; | 348 | goto err_free_ahash; |
@@ -441,9 +441,9 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, | |||
441 | goto err_free_inst; | 441 | goto err_free_inst; |
442 | 442 | ||
443 | crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst)); | 443 | crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst)); |
444 | err = crypto_grab_skcipher2(&ctx->enc, enc_name, 0, | 444 | err = crypto_grab_skcipher(&ctx->enc, enc_name, 0, |
445 | crypto_requires_sync(algt->type, | 445 | crypto_requires_sync(algt->type, |
446 | algt->mask)); | 446 | algt->mask)); |
447 | if (err) | 447 | if (err) |
448 | goto err_drop_auth; | 448 | goto err_drop_auth; |
449 | 449 | ||
diff --git a/crypto/cbc.c b/crypto/cbc.c index 780ee27b2d43..68f751a41a84 100644 --- a/crypto/cbc.c +++ b/crypto/cbc.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * CBC: Cipher Block Chaining mode | 2 | * CBC: Cipher Block Chaining mode |
3 | * | 3 | * |
4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | 4 | * Copyright (c) 2006-2016 Herbert Xu <herbert@gondor.apana.org.au> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License as published by the Free | 7 | * under the terms of the GNU General Public License as published by the Free |
@@ -10,191 +10,78 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <crypto/algapi.h> | 13 | #include <crypto/cbc.h> |
14 | #include <crypto/internal/skcipher.h> | ||
14 | #include <linux/err.h> | 15 | #include <linux/err.h> |
15 | #include <linux/init.h> | 16 | #include <linux/init.h> |
16 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
17 | #include <linux/log2.h> | 18 | #include <linux/log2.h> |
18 | #include <linux/module.h> | 19 | #include <linux/module.h> |
19 | #include <linux/scatterlist.h> | ||
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | 21 | ||
22 | struct crypto_cbc_ctx { | 22 | struct crypto_cbc_ctx { |
23 | struct crypto_cipher *child; | 23 | struct crypto_cipher *child; |
24 | }; | 24 | }; |
25 | 25 | ||
26 | static int crypto_cbc_setkey(struct crypto_tfm *parent, const u8 *key, | 26 | static int crypto_cbc_setkey(struct crypto_skcipher *parent, const u8 *key, |
27 | unsigned int keylen) | 27 | unsigned int keylen) |
28 | { | 28 | { |
29 | struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(parent); | 29 | struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(parent); |
30 | struct crypto_cipher *child = ctx->child; | 30 | struct crypto_cipher *child = ctx->child; |
31 | int err; | 31 | int err; |
32 | 32 | ||
33 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 33 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
34 | crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & | 34 | crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) & |
35 | CRYPTO_TFM_REQ_MASK); | 35 | CRYPTO_TFM_REQ_MASK); |
36 | err = crypto_cipher_setkey(child, key, keylen); | 36 | err = crypto_cipher_setkey(child, key, keylen); |
37 | crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & | 37 | crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) & |
38 | CRYPTO_TFM_RES_MASK); | 38 | CRYPTO_TFM_RES_MASK); |
39 | return err; | 39 | return err; |
40 | } | 40 | } |
41 | 41 | ||
42 | static int crypto_cbc_encrypt_segment(struct blkcipher_desc *desc, | 42 | static inline void crypto_cbc_encrypt_one(struct crypto_skcipher *tfm, |
43 | struct blkcipher_walk *walk, | 43 | const u8 *src, u8 *dst) |
44 | struct crypto_cipher *tfm) | ||
45 | { | 44 | { |
46 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = | 45 | struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); |
47 | crypto_cipher_alg(tfm)->cia_encrypt; | ||
48 | int bsize = crypto_cipher_blocksize(tfm); | ||
49 | unsigned int nbytes = walk->nbytes; | ||
50 | u8 *src = walk->src.virt.addr; | ||
51 | u8 *dst = walk->dst.virt.addr; | ||
52 | u8 *iv = walk->iv; | ||
53 | |||
54 | do { | ||
55 | crypto_xor(iv, src, bsize); | ||
56 | fn(crypto_cipher_tfm(tfm), dst, iv); | ||
57 | memcpy(iv, dst, bsize); | ||
58 | |||
59 | src += bsize; | ||
60 | dst += bsize; | ||
61 | } while ((nbytes -= bsize) >= bsize); | ||
62 | |||
63 | return nbytes; | ||
64 | } | ||
65 | |||
66 | static int crypto_cbc_encrypt_inplace(struct blkcipher_desc *desc, | ||
67 | struct blkcipher_walk *walk, | ||
68 | struct crypto_cipher *tfm) | ||
69 | { | ||
70 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = | ||
71 | crypto_cipher_alg(tfm)->cia_encrypt; | ||
72 | int bsize = crypto_cipher_blocksize(tfm); | ||
73 | unsigned int nbytes = walk->nbytes; | ||
74 | u8 *src = walk->src.virt.addr; | ||
75 | u8 *iv = walk->iv; | ||
76 | |||
77 | do { | ||
78 | crypto_xor(src, iv, bsize); | ||
79 | fn(crypto_cipher_tfm(tfm), src, src); | ||
80 | iv = src; | ||
81 | |||
82 | src += bsize; | ||
83 | } while ((nbytes -= bsize) >= bsize); | ||
84 | 46 | ||
85 | memcpy(walk->iv, iv, bsize); | 47 | crypto_cipher_encrypt_one(ctx->child, dst, src); |
86 | |||
87 | return nbytes; | ||
88 | } | 48 | } |
89 | 49 | ||
90 | static int crypto_cbc_encrypt(struct blkcipher_desc *desc, | 50 | static int crypto_cbc_encrypt(struct skcipher_request *req) |
91 | struct scatterlist *dst, struct scatterlist *src, | ||
92 | unsigned int nbytes) | ||
93 | { | 51 | { |
94 | struct blkcipher_walk walk; | 52 | return crypto_cbc_encrypt_walk(req, crypto_cbc_encrypt_one); |
95 | struct crypto_blkcipher *tfm = desc->tfm; | ||
96 | struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm); | ||
97 | struct crypto_cipher *child = ctx->child; | ||
98 | int err; | ||
99 | |||
100 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
101 | err = blkcipher_walk_virt(desc, &walk); | ||
102 | |||
103 | while ((nbytes = walk.nbytes)) { | ||
104 | if (walk.src.virt.addr == walk.dst.virt.addr) | ||
105 | nbytes = crypto_cbc_encrypt_inplace(desc, &walk, child); | ||
106 | else | ||
107 | nbytes = crypto_cbc_encrypt_segment(desc, &walk, child); | ||
108 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
109 | } | ||
110 | |||
111 | return err; | ||
112 | } | 53 | } |
113 | 54 | ||
114 | static int crypto_cbc_decrypt_segment(struct blkcipher_desc *desc, | 55 | static inline void crypto_cbc_decrypt_one(struct crypto_skcipher *tfm, |
115 | struct blkcipher_walk *walk, | 56 | const u8 *src, u8 *dst) |
116 | struct crypto_cipher *tfm) | ||
117 | { | 57 | { |
118 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = | 58 | struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); |
119 | crypto_cipher_alg(tfm)->cia_decrypt; | ||
120 | int bsize = crypto_cipher_blocksize(tfm); | ||
121 | unsigned int nbytes = walk->nbytes; | ||
122 | u8 *src = walk->src.virt.addr; | ||
123 | u8 *dst = walk->dst.virt.addr; | ||
124 | u8 *iv = walk->iv; | ||
125 | |||
126 | do { | ||
127 | fn(crypto_cipher_tfm(tfm), dst, src); | ||
128 | crypto_xor(dst, iv, bsize); | ||
129 | iv = src; | ||
130 | |||
131 | src += bsize; | ||
132 | dst += bsize; | ||
133 | } while ((nbytes -= bsize) >= bsize); | ||
134 | |||
135 | memcpy(walk->iv, iv, bsize); | ||
136 | |||
137 | return nbytes; | ||
138 | } | ||
139 | 59 | ||
140 | static int crypto_cbc_decrypt_inplace(struct blkcipher_desc *desc, | 60 | crypto_cipher_decrypt_one(ctx->child, dst, src); |
141 | struct blkcipher_walk *walk, | ||
142 | struct crypto_cipher *tfm) | ||
143 | { | ||
144 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = | ||
145 | crypto_cipher_alg(tfm)->cia_decrypt; | ||
146 | int bsize = crypto_cipher_blocksize(tfm); | ||
147 | unsigned int nbytes = walk->nbytes; | ||
148 | u8 *src = walk->src.virt.addr; | ||
149 | u8 last_iv[bsize]; | ||
150 | |||
151 | /* Start of the last block. */ | ||
152 | src += nbytes - (nbytes & (bsize - 1)) - bsize; | ||
153 | memcpy(last_iv, src, bsize); | ||
154 | |||
155 | for (;;) { | ||
156 | fn(crypto_cipher_tfm(tfm), src, src); | ||
157 | if ((nbytes -= bsize) < bsize) | ||
158 | break; | ||
159 | crypto_xor(src, src - bsize, bsize); | ||
160 | src -= bsize; | ||
161 | } | ||
162 | |||
163 | crypto_xor(src, walk->iv, bsize); | ||
164 | memcpy(walk->iv, last_iv, bsize); | ||
165 | |||
166 | return nbytes; | ||
167 | } | 61 | } |
168 | 62 | ||
169 | static int crypto_cbc_decrypt(struct blkcipher_desc *desc, | 63 | static int crypto_cbc_decrypt(struct skcipher_request *req) |
170 | struct scatterlist *dst, struct scatterlist *src, | ||
171 | unsigned int nbytes) | ||
172 | { | 64 | { |
173 | struct blkcipher_walk walk; | 65 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
174 | struct crypto_blkcipher *tfm = desc->tfm; | 66 | struct skcipher_walk walk; |
175 | struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm); | ||
176 | struct crypto_cipher *child = ctx->child; | ||
177 | int err; | 67 | int err; |
178 | 68 | ||
179 | blkcipher_walk_init(&walk, dst, src, nbytes); | 69 | err = skcipher_walk_virt(&walk, req, false); |
180 | err = blkcipher_walk_virt(desc, &walk); | ||
181 | 70 | ||
182 | while ((nbytes = walk.nbytes)) { | 71 | while (walk.nbytes) { |
183 | if (walk.src.virt.addr == walk.dst.virt.addr) | 72 | err = crypto_cbc_decrypt_blocks(&walk, tfm, |
184 | nbytes = crypto_cbc_decrypt_inplace(desc, &walk, child); | 73 | crypto_cbc_decrypt_one); |
185 | else | 74 | err = skcipher_walk_done(&walk, err); |
186 | nbytes = crypto_cbc_decrypt_segment(desc, &walk, child); | ||
187 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
188 | } | 75 | } |
189 | 76 | ||
190 | return err; | 77 | return err; |
191 | } | 78 | } |
192 | 79 | ||
193 | static int crypto_cbc_init_tfm(struct crypto_tfm *tfm) | 80 | static int crypto_cbc_init_tfm(struct crypto_skcipher *tfm) |
194 | { | 81 | { |
195 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 82 | struct skcipher_instance *inst = skcipher_alg_instance(tfm); |
196 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | 83 | struct crypto_spawn *spawn = skcipher_instance_ctx(inst); |
197 | struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm); | 84 | struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); |
198 | struct crypto_cipher *cipher; | 85 | struct crypto_cipher *cipher; |
199 | 86 | ||
200 | cipher = crypto_spawn_cipher(spawn); | 87 | cipher = crypto_spawn_cipher(spawn); |
@@ -205,72 +92,94 @@ static int crypto_cbc_init_tfm(struct crypto_tfm *tfm) | |||
205 | return 0; | 92 | return 0; |
206 | } | 93 | } |
207 | 94 | ||
208 | static void crypto_cbc_exit_tfm(struct crypto_tfm *tfm) | 95 | static void crypto_cbc_exit_tfm(struct crypto_skcipher *tfm) |
209 | { | 96 | { |
210 | struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm); | 97 | struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); |
98 | |||
211 | crypto_free_cipher(ctx->child); | 99 | crypto_free_cipher(ctx->child); |
212 | } | 100 | } |
213 | 101 | ||
214 | static struct crypto_instance *crypto_cbc_alloc(struct rtattr **tb) | 102 | static void crypto_cbc_free(struct skcipher_instance *inst) |
103 | { | ||
104 | crypto_drop_skcipher(skcipher_instance_ctx(inst)); | ||
105 | kfree(inst); | ||
106 | } | ||
107 | |||
108 | static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb) | ||
215 | { | 109 | { |
216 | struct crypto_instance *inst; | 110 | struct skcipher_instance *inst; |
111 | struct crypto_spawn *spawn; | ||
217 | struct crypto_alg *alg; | 112 | struct crypto_alg *alg; |
218 | int err; | 113 | int err; |
219 | 114 | ||
220 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); | 115 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER); |
221 | if (err) | 116 | if (err) |
222 | return ERR_PTR(err); | 117 | return err; |
118 | |||
119 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); | ||
120 | if (!inst) | ||
121 | return -ENOMEM; | ||
223 | 122 | ||
224 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, | 123 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, |
225 | CRYPTO_ALG_TYPE_MASK); | 124 | CRYPTO_ALG_TYPE_MASK); |
125 | err = PTR_ERR(alg); | ||
226 | if (IS_ERR(alg)) | 126 | if (IS_ERR(alg)) |
227 | return ERR_CAST(alg); | 127 | goto err_free_inst; |
228 | 128 | ||
229 | inst = ERR_PTR(-EINVAL); | 129 | spawn = skcipher_instance_ctx(inst); |
230 | if (!is_power_of_2(alg->cra_blocksize)) | 130 | err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), |
231 | goto out_put_alg; | 131 | CRYPTO_ALG_TYPE_MASK); |
132 | crypto_mod_put(alg); | ||
133 | if (err) | ||
134 | goto err_free_inst; | ||
232 | 135 | ||
233 | inst = crypto_alloc_instance("cbc", alg); | 136 | err = crypto_inst_setname(skcipher_crypto_instance(inst), "cbc", alg); |
234 | if (IS_ERR(inst)) | 137 | if (err) |
235 | goto out_put_alg; | 138 | goto err_drop_spawn; |
236 | 139 | ||
237 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; | 140 | err = -EINVAL; |
238 | inst->alg.cra_priority = alg->cra_priority; | 141 | if (!is_power_of_2(alg->cra_blocksize)) |
239 | inst->alg.cra_blocksize = alg->cra_blocksize; | 142 | goto err_drop_spawn; |
240 | inst->alg.cra_alignmask = alg->cra_alignmask; | 143 | |
241 | inst->alg.cra_type = &crypto_blkcipher_type; | 144 | inst->alg.base.cra_priority = alg->cra_priority; |
145 | inst->alg.base.cra_blocksize = alg->cra_blocksize; | ||
146 | inst->alg.base.cra_alignmask = alg->cra_alignmask; | ||
242 | 147 | ||
243 | /* We access the data as u32s when xoring. */ | 148 | /* We access the data as u32s when xoring. */ |
244 | inst->alg.cra_alignmask |= __alignof__(u32) - 1; | 149 | inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; |
245 | 150 | ||
246 | inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; | 151 | inst->alg.ivsize = alg->cra_blocksize; |
247 | inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; | 152 | inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize; |
248 | inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; | 153 | inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize; |
249 | 154 | ||
250 | inst->alg.cra_ctxsize = sizeof(struct crypto_cbc_ctx); | 155 | inst->alg.base.cra_ctxsize = sizeof(struct crypto_cbc_ctx); |
251 | 156 | ||
252 | inst->alg.cra_init = crypto_cbc_init_tfm; | 157 | inst->alg.init = crypto_cbc_init_tfm; |
253 | inst->alg.cra_exit = crypto_cbc_exit_tfm; | 158 | inst->alg.exit = crypto_cbc_exit_tfm; |
254 | 159 | ||
255 | inst->alg.cra_blkcipher.setkey = crypto_cbc_setkey; | 160 | inst->alg.setkey = crypto_cbc_setkey; |
256 | inst->alg.cra_blkcipher.encrypt = crypto_cbc_encrypt; | 161 | inst->alg.encrypt = crypto_cbc_encrypt; |
257 | inst->alg.cra_blkcipher.decrypt = crypto_cbc_decrypt; | 162 | inst->alg.decrypt = crypto_cbc_decrypt; |
258 | 163 | ||
259 | out_put_alg: | 164 | inst->free = crypto_cbc_free; |
260 | crypto_mod_put(alg); | ||
261 | return inst; | ||
262 | } | ||
263 | 165 | ||
264 | static void crypto_cbc_free(struct crypto_instance *inst) | 166 | err = skcipher_register_instance(tmpl, inst); |
265 | { | 167 | if (err) |
266 | crypto_drop_spawn(crypto_instance_ctx(inst)); | 168 | goto err_drop_spawn; |
169 | |||
170 | out: | ||
171 | return err; | ||
172 | |||
173 | err_drop_spawn: | ||
174 | crypto_drop_spawn(spawn); | ||
175 | err_free_inst: | ||
267 | kfree(inst); | 176 | kfree(inst); |
177 | goto out; | ||
268 | } | 178 | } |
269 | 179 | ||
270 | static struct crypto_template crypto_cbc_tmpl = { | 180 | static struct crypto_template crypto_cbc_tmpl = { |
271 | .name = "cbc", | 181 | .name = "cbc", |
272 | .alloc = crypto_cbc_alloc, | 182 | .create = crypto_cbc_create, |
273 | .free = crypto_cbc_free, | ||
274 | .module = THIS_MODULE, | 183 | .module = THIS_MODULE, |
275 | }; | 184 | }; |
276 | 185 | ||
diff --git a/crypto/ccm.c b/crypto/ccm.c index 006d8575ef5c..26b924d1e582 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c | |||
@@ -462,7 +462,7 @@ static int crypto_ccm_init_tfm(struct crypto_aead *tfm) | |||
462 | if (IS_ERR(cipher)) | 462 | if (IS_ERR(cipher)) |
463 | return PTR_ERR(cipher); | 463 | return PTR_ERR(cipher); |
464 | 464 | ||
465 | ctr = crypto_spawn_skcipher2(&ictx->ctr); | 465 | ctr = crypto_spawn_skcipher(&ictx->ctr); |
466 | err = PTR_ERR(ctr); | 466 | err = PTR_ERR(ctr); |
467 | if (IS_ERR(ctr)) | 467 | if (IS_ERR(ctr)) |
468 | goto err_free_cipher; | 468 | goto err_free_cipher; |
@@ -544,9 +544,9 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, | |||
544 | goto err_free_inst; | 544 | goto err_free_inst; |
545 | 545 | ||
546 | crypto_set_skcipher_spawn(&ictx->ctr, aead_crypto_instance(inst)); | 546 | crypto_set_skcipher_spawn(&ictx->ctr, aead_crypto_instance(inst)); |
547 | err = crypto_grab_skcipher2(&ictx->ctr, ctr_name, 0, | 547 | err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0, |
548 | crypto_requires_sync(algt->type, | 548 | crypto_requires_sync(algt->type, |
549 | algt->mask)); | 549 | algt->mask)); |
550 | if (err) | 550 | if (err) |
551 | goto err_drop_cipher; | 551 | goto err_drop_cipher; |
552 | 552 | ||
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index e899ef51dc8e..db1bc3147bc4 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c | |||
@@ -532,7 +532,7 @@ static int chachapoly_init(struct crypto_aead *tfm) | |||
532 | if (IS_ERR(poly)) | 532 | if (IS_ERR(poly)) |
533 | return PTR_ERR(poly); | 533 | return PTR_ERR(poly); |
534 | 534 | ||
535 | chacha = crypto_spawn_skcipher2(&ictx->chacha); | 535 | chacha = crypto_spawn_skcipher(&ictx->chacha); |
536 | if (IS_ERR(chacha)) { | 536 | if (IS_ERR(chacha)) { |
537 | crypto_free_ahash(poly); | 537 | crypto_free_ahash(poly); |
538 | return PTR_ERR(chacha); | 538 | return PTR_ERR(chacha); |
@@ -625,9 +625,9 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, | |||
625 | goto err_free_inst; | 625 | goto err_free_inst; |
626 | 626 | ||
627 | crypto_set_skcipher_spawn(&ctx->chacha, aead_crypto_instance(inst)); | 627 | crypto_set_skcipher_spawn(&ctx->chacha, aead_crypto_instance(inst)); |
628 | err = crypto_grab_skcipher2(&ctx->chacha, chacha_name, 0, | 628 | err = crypto_grab_skcipher(&ctx->chacha, chacha_name, 0, |
629 | crypto_requires_sync(algt->type, | 629 | crypto_requires_sync(algt->type, |
630 | algt->mask)); | 630 | algt->mask)); |
631 | if (err) | 631 | if (err) |
632 | goto err_drop_poly; | 632 | goto err_drop_poly; |
633 | 633 | ||
diff --git a/crypto/cipher.c b/crypto/cipher.c index 39541e0e537d..94fa3551476b 100644 --- a/crypto/cipher.c +++ b/crypto/cipher.c | |||
@@ -116,7 +116,3 @@ int crypto_init_cipher_ops(struct crypto_tfm *tfm) | |||
116 | 116 | ||
117 | return 0; | 117 | return 0; |
118 | } | 118 | } |
119 | |||
120 | void crypto_exit_cipher_ops(struct crypto_tfm *tfm) | ||
121 | { | ||
122 | } | ||
diff --git a/crypto/cmac.c b/crypto/cmac.c index 7a8bfbd548f6..04080dca8f0c 100644 --- a/crypto/cmac.c +++ b/crypto/cmac.c | |||
@@ -57,7 +57,8 @@ static int crypto_cmac_digest_setkey(struct crypto_shash *parent, | |||
57 | unsigned long alignmask = crypto_shash_alignmask(parent); | 57 | unsigned long alignmask = crypto_shash_alignmask(parent); |
58 | struct cmac_tfm_ctx *ctx = crypto_shash_ctx(parent); | 58 | struct cmac_tfm_ctx *ctx = crypto_shash_ctx(parent); |
59 | unsigned int bs = crypto_shash_blocksize(parent); | 59 | unsigned int bs = crypto_shash_blocksize(parent); |
60 | __be64 *consts = PTR_ALIGN((void *)ctx->ctx, alignmask + 1); | 60 | __be64 *consts = PTR_ALIGN((void *)ctx->ctx, |
61 | (alignmask | (__alignof__(__be64) - 1)) + 1); | ||
61 | u64 _const[2]; | 62 | u64 _const[2]; |
62 | int i, err = 0; | 63 | int i, err = 0; |
63 | u8 msb_mask, gfmask; | 64 | u8 msb_mask, gfmask; |
@@ -173,7 +174,8 @@ static int crypto_cmac_digest_final(struct shash_desc *pdesc, u8 *out) | |||
173 | struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc); | 174 | struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc); |
174 | struct crypto_cipher *tfm = tctx->child; | 175 | struct crypto_cipher *tfm = tctx->child; |
175 | int bs = crypto_shash_blocksize(parent); | 176 | int bs = crypto_shash_blocksize(parent); |
176 | u8 *consts = PTR_ALIGN((void *)tctx->ctx, alignmask + 1); | 177 | u8 *consts = PTR_ALIGN((void *)tctx->ctx, |
178 | (alignmask | (__alignof__(__be64) - 1)) + 1); | ||
177 | u8 *odds = PTR_ALIGN((void *)ctx->ctx, alignmask + 1); | 179 | u8 *odds = PTR_ALIGN((void *)ctx->ctx, alignmask + 1); |
178 | u8 *prev = odds + bs; | 180 | u8 *prev = odds + bs; |
179 | unsigned int offset = 0; | 181 | unsigned int offset = 0; |
@@ -243,6 +245,7 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
243 | case 8: | 245 | case 8: |
244 | break; | 246 | break; |
245 | default: | 247 | default: |
248 | err = -EINVAL; | ||
246 | goto out_put_alg; | 249 | goto out_put_alg; |
247 | } | 250 | } |
248 | 251 | ||
@@ -257,7 +260,8 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
257 | if (err) | 260 | if (err) |
258 | goto out_free_inst; | 261 | goto out_free_inst; |
259 | 262 | ||
260 | alignmask = alg->cra_alignmask | (sizeof(long) - 1); | 263 | /* We access the data as u32s when xoring. */ |
264 | alignmask = alg->cra_alignmask | (__alignof__(u32) - 1); | ||
261 | inst->alg.base.cra_alignmask = alignmask; | 265 | inst->alg.base.cra_alignmask = alignmask; |
262 | inst->alg.base.cra_priority = alg->cra_priority; | 266 | inst->alg.base.cra_priority = alg->cra_priority; |
263 | inst->alg.base.cra_blocksize = alg->cra_blocksize; | 267 | inst->alg.base.cra_blocksize = alg->cra_blocksize; |
@@ -269,7 +273,9 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
269 | + alg->cra_blocksize * 2; | 273 | + alg->cra_blocksize * 2; |
270 | 274 | ||
271 | inst->alg.base.cra_ctxsize = | 275 | inst->alg.base.cra_ctxsize = |
272 | ALIGN(sizeof(struct cmac_tfm_ctx), alignmask + 1) | 276 | ALIGN(sizeof(struct cmac_tfm_ctx), crypto_tfm_ctx_alignment()) |
277 | + ((alignmask | (__alignof__(__be64) - 1)) & | ||
278 | ~(crypto_tfm_ctx_alignment() - 1)) | ||
273 | + alg->cra_blocksize * 2; | 279 | + alg->cra_blocksize * 2; |
274 | 280 | ||
275 | inst->alg.base.cra_init = cmac_init_tfm; | 281 | inst->alg.base.cra_init = cmac_init_tfm; |
diff --git a/crypto/compress.c b/crypto/compress.c index c33f0763a956..f2d522924a07 100644 --- a/crypto/compress.c +++ b/crypto/compress.c | |||
@@ -42,7 +42,3 @@ int crypto_init_compress_ops(struct crypto_tfm *tfm) | |||
42 | 42 | ||
43 | return 0; | 43 | return 0; |
44 | } | 44 | } |
45 | |||
46 | void crypto_exit_compress_ops(struct crypto_tfm *tfm) | ||
47 | { | ||
48 | } | ||
diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 0c654e59f215..0508c48a45c4 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c | |||
@@ -17,9 +17,9 @@ | |||
17 | * | 17 | * |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <crypto/algapi.h> | ||
21 | #include <crypto/internal/hash.h> | 20 | #include <crypto/internal/hash.h> |
22 | #include <crypto/internal/aead.h> | 21 | #include <crypto/internal/aead.h> |
22 | #include <crypto/internal/skcipher.h> | ||
23 | #include <crypto/cryptd.h> | 23 | #include <crypto/cryptd.h> |
24 | #include <crypto/crypto_wq.h> | 24 | #include <crypto/crypto_wq.h> |
25 | #include <linux/atomic.h> | 25 | #include <linux/atomic.h> |
@@ -48,6 +48,11 @@ struct cryptd_instance_ctx { | |||
48 | struct cryptd_queue *queue; | 48 | struct cryptd_queue *queue; |
49 | }; | 49 | }; |
50 | 50 | ||
51 | struct skcipherd_instance_ctx { | ||
52 | struct crypto_skcipher_spawn spawn; | ||
53 | struct cryptd_queue *queue; | ||
54 | }; | ||
55 | |||
51 | struct hashd_instance_ctx { | 56 | struct hashd_instance_ctx { |
52 | struct crypto_shash_spawn spawn; | 57 | struct crypto_shash_spawn spawn; |
53 | struct cryptd_queue *queue; | 58 | struct cryptd_queue *queue; |
@@ -67,6 +72,15 @@ struct cryptd_blkcipher_request_ctx { | |||
67 | crypto_completion_t complete; | 72 | crypto_completion_t complete; |
68 | }; | 73 | }; |
69 | 74 | ||
75 | struct cryptd_skcipher_ctx { | ||
76 | atomic_t refcnt; | ||
77 | struct crypto_skcipher *child; | ||
78 | }; | ||
79 | |||
80 | struct cryptd_skcipher_request_ctx { | ||
81 | crypto_completion_t complete; | ||
82 | }; | ||
83 | |||
70 | struct cryptd_hash_ctx { | 84 | struct cryptd_hash_ctx { |
71 | atomic_t refcnt; | 85 | atomic_t refcnt; |
72 | struct crypto_shash *child; | 86 | struct crypto_shash *child; |
@@ -122,7 +136,6 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, | |||
122 | { | 136 | { |
123 | int cpu, err; | 137 | int cpu, err; |
124 | struct cryptd_cpu_queue *cpu_queue; | 138 | struct cryptd_cpu_queue *cpu_queue; |
125 | struct crypto_tfm *tfm; | ||
126 | atomic_t *refcnt; | 139 | atomic_t *refcnt; |
127 | bool may_backlog; | 140 | bool may_backlog; |
128 | 141 | ||
@@ -141,7 +154,6 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, | |||
141 | if (!atomic_read(refcnt)) | 154 | if (!atomic_read(refcnt)) |
142 | goto out_put_cpu; | 155 | goto out_put_cpu; |
143 | 156 | ||
144 | tfm = request->tfm; | ||
145 | atomic_inc(refcnt); | 157 | atomic_inc(refcnt); |
146 | 158 | ||
147 | out_put_cpu: | 159 | out_put_cpu: |
@@ -432,6 +444,216 @@ out_put_alg: | |||
432 | return err; | 444 | return err; |
433 | } | 445 | } |
434 | 446 | ||
447 | static int cryptd_skcipher_setkey(struct crypto_skcipher *parent, | ||
448 | const u8 *key, unsigned int keylen) | ||
449 | { | ||
450 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent); | ||
451 | struct crypto_skcipher *child = ctx->child; | ||
452 | int err; | ||
453 | |||
454 | crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | ||
455 | crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & | ||
456 | CRYPTO_TFM_REQ_MASK); | ||
457 | err = crypto_skcipher_setkey(child, key, keylen); | ||
458 | crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & | ||
459 | CRYPTO_TFM_RES_MASK); | ||
460 | return err; | ||
461 | } | ||
462 | |||
463 | static void cryptd_skcipher_complete(struct skcipher_request *req, int err) | ||
464 | { | ||
465 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | ||
466 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
467 | struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); | ||
468 | int refcnt = atomic_read(&ctx->refcnt); | ||
469 | |||
470 | local_bh_disable(); | ||
471 | rctx->complete(&req->base, err); | ||
472 | local_bh_enable(); | ||
473 | |||
474 | if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) | ||
475 | crypto_free_skcipher(tfm); | ||
476 | } | ||
477 | |||
478 | static void cryptd_skcipher_encrypt(struct crypto_async_request *base, | ||
479 | int err) | ||
480 | { | ||
481 | struct skcipher_request *req = skcipher_request_cast(base); | ||
482 | struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); | ||
483 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | ||
484 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
485 | struct crypto_skcipher *child = ctx->child; | ||
486 | SKCIPHER_REQUEST_ON_STACK(subreq, child); | ||
487 | |||
488 | if (unlikely(err == -EINPROGRESS)) | ||
489 | goto out; | ||
490 | |||
491 | skcipher_request_set_tfm(subreq, child); | ||
492 | skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, | ||
493 | NULL, NULL); | ||
494 | skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, | ||
495 | req->iv); | ||
496 | |||
497 | err = crypto_skcipher_encrypt(subreq); | ||
498 | skcipher_request_zero(subreq); | ||
499 | |||
500 | req->base.complete = rctx->complete; | ||
501 | |||
502 | out: | ||
503 | cryptd_skcipher_complete(req, err); | ||
504 | } | ||
505 | |||
506 | static void cryptd_skcipher_decrypt(struct crypto_async_request *base, | ||
507 | int err) | ||
508 | { | ||
509 | struct skcipher_request *req = skcipher_request_cast(base); | ||
510 | struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); | ||
511 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | ||
512 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
513 | struct crypto_skcipher *child = ctx->child; | ||
514 | SKCIPHER_REQUEST_ON_STACK(subreq, child); | ||
515 | |||
516 | if (unlikely(err == -EINPROGRESS)) | ||
517 | goto out; | ||
518 | |||
519 | skcipher_request_set_tfm(subreq, child); | ||
520 | skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, | ||
521 | NULL, NULL); | ||
522 | skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, | ||
523 | req->iv); | ||
524 | |||
525 | err = crypto_skcipher_decrypt(subreq); | ||
526 | skcipher_request_zero(subreq); | ||
527 | |||
528 | req->base.complete = rctx->complete; | ||
529 | |||
530 | out: | ||
531 | cryptd_skcipher_complete(req, err); | ||
532 | } | ||
533 | |||
534 | static int cryptd_skcipher_enqueue(struct skcipher_request *req, | ||
535 | crypto_completion_t compl) | ||
536 | { | ||
537 | struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); | ||
538 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | ||
539 | struct cryptd_queue *queue; | ||
540 | |||
541 | queue = cryptd_get_queue(crypto_skcipher_tfm(tfm)); | ||
542 | rctx->complete = req->base.complete; | ||
543 | req->base.complete = compl; | ||
544 | |||
545 | return cryptd_enqueue_request(queue, &req->base); | ||
546 | } | ||
547 | |||
548 | static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req) | ||
549 | { | ||
550 | return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt); | ||
551 | } | ||
552 | |||
553 | static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req) | ||
554 | { | ||
555 | return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt); | ||
556 | } | ||
557 | |||
558 | static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm) | ||
559 | { | ||
560 | struct skcipher_instance *inst = skcipher_alg_instance(tfm); | ||
561 | struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst); | ||
562 | struct crypto_skcipher_spawn *spawn = &ictx->spawn; | ||
563 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
564 | struct crypto_skcipher *cipher; | ||
565 | |||
566 | cipher = crypto_spawn_skcipher(spawn); | ||
567 | if (IS_ERR(cipher)) | ||
568 | return PTR_ERR(cipher); | ||
569 | |||
570 | ctx->child = cipher; | ||
571 | crypto_skcipher_set_reqsize( | ||
572 | tfm, sizeof(struct cryptd_skcipher_request_ctx)); | ||
573 | return 0; | ||
574 | } | ||
575 | |||
576 | static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm) | ||
577 | { | ||
578 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
579 | |||
580 | crypto_free_skcipher(ctx->child); | ||
581 | } | ||
582 | |||
583 | static void cryptd_skcipher_free(struct skcipher_instance *inst) | ||
584 | { | ||
585 | struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst); | ||
586 | |||
587 | crypto_drop_skcipher(&ctx->spawn); | ||
588 | } | ||
589 | |||
590 | static int cryptd_create_skcipher(struct crypto_template *tmpl, | ||
591 | struct rtattr **tb, | ||
592 | struct cryptd_queue *queue) | ||
593 | { | ||
594 | struct skcipherd_instance_ctx *ctx; | ||
595 | struct skcipher_instance *inst; | ||
596 | struct skcipher_alg *alg; | ||
597 | const char *name; | ||
598 | u32 type; | ||
599 | u32 mask; | ||
600 | int err; | ||
601 | |||
602 | type = 0; | ||
603 | mask = CRYPTO_ALG_ASYNC; | ||
604 | |||
605 | cryptd_check_internal(tb, &type, &mask); | ||
606 | |||
607 | name = crypto_attr_alg_name(tb[1]); | ||
608 | if (IS_ERR(name)) | ||
609 | return PTR_ERR(name); | ||
610 | |||
611 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | ||
612 | if (!inst) | ||
613 | return -ENOMEM; | ||
614 | |||
615 | ctx = skcipher_instance_ctx(inst); | ||
616 | ctx->queue = queue; | ||
617 | |||
618 | crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst)); | ||
619 | err = crypto_grab_skcipher(&ctx->spawn, name, type, mask); | ||
620 | if (err) | ||
621 | goto out_free_inst; | ||
622 | |||
623 | alg = crypto_spawn_skcipher_alg(&ctx->spawn); | ||
624 | err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base); | ||
625 | if (err) | ||
626 | goto out_drop_skcipher; | ||
627 | |||
628 | inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | | ||
629 | (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); | ||
630 | |||
631 | inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg); | ||
632 | inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); | ||
633 | inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); | ||
634 | inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg); | ||
635 | |||
636 | inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx); | ||
637 | |||
638 | inst->alg.init = cryptd_skcipher_init_tfm; | ||
639 | inst->alg.exit = cryptd_skcipher_exit_tfm; | ||
640 | |||
641 | inst->alg.setkey = cryptd_skcipher_setkey; | ||
642 | inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue; | ||
643 | inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue; | ||
644 | |||
645 | inst->free = cryptd_skcipher_free; | ||
646 | |||
647 | err = skcipher_register_instance(tmpl, inst); | ||
648 | if (err) { | ||
649 | out_drop_skcipher: | ||
650 | crypto_drop_skcipher(&ctx->spawn); | ||
651 | out_free_inst: | ||
652 | kfree(inst); | ||
653 | } | ||
654 | return err; | ||
655 | } | ||
656 | |||
435 | static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) | 657 | static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) |
436 | { | 658 | { |
437 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 659 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
@@ -895,7 +1117,11 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
895 | 1117 | ||
896 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | 1118 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { |
897 | case CRYPTO_ALG_TYPE_BLKCIPHER: | 1119 | case CRYPTO_ALG_TYPE_BLKCIPHER: |
898 | return cryptd_create_blkcipher(tmpl, tb, &queue); | 1120 | if ((algt->type & CRYPTO_ALG_TYPE_MASK) == |
1121 | CRYPTO_ALG_TYPE_BLKCIPHER) | ||
1122 | return cryptd_create_blkcipher(tmpl, tb, &queue); | ||
1123 | |||
1124 | return cryptd_create_skcipher(tmpl, tb, &queue); | ||
899 | case CRYPTO_ALG_TYPE_DIGEST: | 1125 | case CRYPTO_ALG_TYPE_DIGEST: |
900 | return cryptd_create_hash(tmpl, tb, &queue); | 1126 | return cryptd_create_hash(tmpl, tb, &queue); |
901 | case CRYPTO_ALG_TYPE_AEAD: | 1127 | case CRYPTO_ALG_TYPE_AEAD: |
@@ -985,6 +1211,58 @@ void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) | |||
985 | } | 1211 | } |
986 | EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); | 1212 | EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); |
987 | 1213 | ||
1214 | struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, | ||
1215 | u32 type, u32 mask) | ||
1216 | { | ||
1217 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | ||
1218 | struct cryptd_skcipher_ctx *ctx; | ||
1219 | struct crypto_skcipher *tfm; | ||
1220 | |||
1221 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | ||
1222 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | ||
1223 | return ERR_PTR(-EINVAL); | ||
1224 | |||
1225 | tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask); | ||
1226 | if (IS_ERR(tfm)) | ||
1227 | return ERR_CAST(tfm); | ||
1228 | |||
1229 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { | ||
1230 | crypto_free_skcipher(tfm); | ||
1231 | return ERR_PTR(-EINVAL); | ||
1232 | } | ||
1233 | |||
1234 | ctx = crypto_skcipher_ctx(tfm); | ||
1235 | atomic_set(&ctx->refcnt, 1); | ||
1236 | |||
1237 | return container_of(tfm, struct cryptd_skcipher, base); | ||
1238 | } | ||
1239 | EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher); | ||
1240 | |||
1241 | struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm) | ||
1242 | { | ||
1243 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); | ||
1244 | |||
1245 | return ctx->child; | ||
1246 | } | ||
1247 | EXPORT_SYMBOL_GPL(cryptd_skcipher_child); | ||
1248 | |||
1249 | bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm) | ||
1250 | { | ||
1251 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); | ||
1252 | |||
1253 | return atomic_read(&ctx->refcnt) - 1; | ||
1254 | } | ||
1255 | EXPORT_SYMBOL_GPL(cryptd_skcipher_queued); | ||
1256 | |||
1257 | void cryptd_free_skcipher(struct cryptd_skcipher *tfm) | ||
1258 | { | ||
1259 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); | ||
1260 | |||
1261 | if (atomic_dec_and_test(&ctx->refcnt)) | ||
1262 | crypto_free_skcipher(&tfm->base); | ||
1263 | } | ||
1264 | EXPORT_SYMBOL_GPL(cryptd_free_skcipher); | ||
1265 | |||
988 | struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, | 1266 | struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, |
989 | u32 type, u32 mask) | 1267 | u32 type, u32 mask) |
990 | { | 1268 | { |
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index 6989ba0046df..f1bf3418d968 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c | |||
@@ -47,7 +47,7 @@ static void crypto_pump_requests(struct crypto_engine *engine, | |||
47 | 47 | ||
48 | /* If another context is idling then defer */ | 48 | /* If another context is idling then defer */ |
49 | if (engine->idling) { | 49 | if (engine->idling) { |
50 | kthread_queue_work(&engine->kworker, &engine->pump_requests); | 50 | kthread_queue_work(engine->kworker, &engine->pump_requests); |
51 | goto out; | 51 | goto out; |
52 | } | 52 | } |
53 | 53 | ||
@@ -58,7 +58,7 @@ static void crypto_pump_requests(struct crypto_engine *engine, | |||
58 | 58 | ||
59 | /* Only do teardown in the thread */ | 59 | /* Only do teardown in the thread */ |
60 | if (!in_kthread) { | 60 | if (!in_kthread) { |
61 | kthread_queue_work(&engine->kworker, | 61 | kthread_queue_work(engine->kworker, |
62 | &engine->pump_requests); | 62 | &engine->pump_requests); |
63 | goto out; | 63 | goto out; |
64 | } | 64 | } |
@@ -189,7 +189,7 @@ int crypto_transfer_cipher_request(struct crypto_engine *engine, | |||
189 | ret = ablkcipher_enqueue_request(&engine->queue, req); | 189 | ret = ablkcipher_enqueue_request(&engine->queue, req); |
190 | 190 | ||
191 | if (!engine->busy && need_pump) | 191 | if (!engine->busy && need_pump) |
192 | kthread_queue_work(&engine->kworker, &engine->pump_requests); | 192 | kthread_queue_work(engine->kworker, &engine->pump_requests); |
193 | 193 | ||
194 | spin_unlock_irqrestore(&engine->queue_lock, flags); | 194 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
195 | return ret; | 195 | return ret; |
@@ -231,7 +231,7 @@ int crypto_transfer_hash_request(struct crypto_engine *engine, | |||
231 | ret = ahash_enqueue_request(&engine->queue, req); | 231 | ret = ahash_enqueue_request(&engine->queue, req); |
232 | 232 | ||
233 | if (!engine->busy && need_pump) | 233 | if (!engine->busy && need_pump) |
234 | kthread_queue_work(&engine->kworker, &engine->pump_requests); | 234 | kthread_queue_work(engine->kworker, &engine->pump_requests); |
235 | 235 | ||
236 | spin_unlock_irqrestore(&engine->queue_lock, flags); | 236 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
237 | return ret; | 237 | return ret; |
@@ -284,7 +284,7 @@ void crypto_finalize_cipher_request(struct crypto_engine *engine, | |||
284 | 284 | ||
285 | req->base.complete(&req->base, err); | 285 | req->base.complete(&req->base, err); |
286 | 286 | ||
287 | kthread_queue_work(&engine->kworker, &engine->pump_requests); | 287 | kthread_queue_work(engine->kworker, &engine->pump_requests); |
288 | } | 288 | } |
289 | EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request); | 289 | EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request); |
290 | 290 | ||
@@ -321,7 +321,7 @@ void crypto_finalize_hash_request(struct crypto_engine *engine, | |||
321 | 321 | ||
322 | req->base.complete(&req->base, err); | 322 | req->base.complete(&req->base, err); |
323 | 323 | ||
324 | kthread_queue_work(&engine->kworker, &engine->pump_requests); | 324 | kthread_queue_work(engine->kworker, &engine->pump_requests); |
325 | } | 325 | } |
326 | EXPORT_SYMBOL_GPL(crypto_finalize_hash_request); | 326 | EXPORT_SYMBOL_GPL(crypto_finalize_hash_request); |
327 | 327 | ||
@@ -345,7 +345,7 @@ int crypto_engine_start(struct crypto_engine *engine) | |||
345 | engine->running = true; | 345 | engine->running = true; |
346 | spin_unlock_irqrestore(&engine->queue_lock, flags); | 346 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
347 | 347 | ||
348 | kthread_queue_work(&engine->kworker, &engine->pump_requests); | 348 | kthread_queue_work(engine->kworker, &engine->pump_requests); |
349 | 349 | ||
350 | return 0; | 350 | return 0; |
351 | } | 351 | } |
@@ -422,11 +422,8 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) | |||
422 | crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN); | 422 | crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN); |
423 | spin_lock_init(&engine->queue_lock); | 423 | spin_lock_init(&engine->queue_lock); |
424 | 424 | ||
425 | kthread_init_worker(&engine->kworker); | 425 | engine->kworker = kthread_create_worker(0, "%s", engine->name); |
426 | engine->kworker_task = kthread_run(kthread_worker_fn, | 426 | if (IS_ERR(engine->kworker)) { |
427 | &engine->kworker, "%s", | ||
428 | engine->name); | ||
429 | if (IS_ERR(engine->kworker_task)) { | ||
430 | dev_err(dev, "failed to create crypto request pump task\n"); | 427 | dev_err(dev, "failed to create crypto request pump task\n"); |
431 | return NULL; | 428 | return NULL; |
432 | } | 429 | } |
@@ -434,7 +431,7 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) | |||
434 | 431 | ||
435 | if (engine->rt) { | 432 | if (engine->rt) { |
436 | dev_info(dev, "will run requests pump with realtime priority\n"); | 433 | dev_info(dev, "will run requests pump with realtime priority\n"); |
437 | sched_setscheduler(engine->kworker_task, SCHED_FIFO, ¶m); | 434 | sched_setscheduler(engine->kworker->task, SCHED_FIFO, ¶m); |
438 | } | 435 | } |
439 | 436 | ||
440 | return engine; | 437 | return engine; |
@@ -455,8 +452,7 @@ int crypto_engine_exit(struct crypto_engine *engine) | |||
455 | if (ret) | 452 | if (ret) |
456 | return ret; | 453 | return ret; |
457 | 454 | ||
458 | kthread_flush_worker(&engine->kworker); | 455 | kthread_destroy_worker(engine->kworker); |
459 | kthread_stop(engine->kworker_task); | ||
460 | 456 | ||
461 | return 0; | 457 | return 0; |
462 | } | 458 | } |
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index 1c5705481c69..a90404a0c5ff 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c | |||
@@ -112,6 +112,21 @@ nla_put_failure: | |||
112 | return -EMSGSIZE; | 112 | return -EMSGSIZE; |
113 | } | 113 | } |
114 | 114 | ||
115 | static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg) | ||
116 | { | ||
117 | struct crypto_report_acomp racomp; | ||
118 | |||
119 | strncpy(racomp.type, "acomp", sizeof(racomp.type)); | ||
120 | |||
121 | if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, | ||
122 | sizeof(struct crypto_report_acomp), &racomp)) | ||
123 | goto nla_put_failure; | ||
124 | return 0; | ||
125 | |||
126 | nla_put_failure: | ||
127 | return -EMSGSIZE; | ||
128 | } | ||
129 | |||
115 | static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg) | 130 | static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg) |
116 | { | 131 | { |
117 | struct crypto_report_akcipher rakcipher; | 132 | struct crypto_report_akcipher rakcipher; |
@@ -186,7 +201,11 @@ static int crypto_report_one(struct crypto_alg *alg, | |||
186 | goto nla_put_failure; | 201 | goto nla_put_failure; |
187 | 202 | ||
188 | break; | 203 | break; |
204 | case CRYPTO_ALG_TYPE_ACOMPRESS: | ||
205 | if (crypto_report_acomp(skb, alg)) | ||
206 | goto nla_put_failure; | ||
189 | 207 | ||
208 | break; | ||
190 | case CRYPTO_ALG_TYPE_AKCIPHER: | 209 | case CRYPTO_ALG_TYPE_AKCIPHER: |
191 | if (crypto_report_akcipher(skb, alg)) | 210 | if (crypto_report_akcipher(skb, alg)) |
192 | goto nla_put_failure; | 211 | goto nla_put_failure; |
diff --git a/crypto/ctr.c b/crypto/ctr.c index ff4d21eddb83..a9a7a44f2783 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c | |||
@@ -312,7 +312,7 @@ static int crypto_rfc3686_init_tfm(struct crypto_skcipher *tfm) | |||
312 | unsigned long align; | 312 | unsigned long align; |
313 | unsigned int reqsize; | 313 | unsigned int reqsize; |
314 | 314 | ||
315 | cipher = crypto_spawn_skcipher2(spawn); | 315 | cipher = crypto_spawn_skcipher(spawn); |
316 | if (IS_ERR(cipher)) | 316 | if (IS_ERR(cipher)) |
317 | return PTR_ERR(cipher); | 317 | return PTR_ERR(cipher); |
318 | 318 | ||
@@ -370,9 +370,9 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl, | |||
370 | spawn = skcipher_instance_ctx(inst); | 370 | spawn = skcipher_instance_ctx(inst); |
371 | 371 | ||
372 | crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); | 372 | crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); |
373 | err = crypto_grab_skcipher2(spawn, cipher_name, 0, | 373 | err = crypto_grab_skcipher(spawn, cipher_name, 0, |
374 | crypto_requires_sync(algt->type, | 374 | crypto_requires_sync(algt->type, |
375 | algt->mask)); | 375 | algt->mask)); |
376 | if (err) | 376 | if (err) |
377 | goto err_free_inst; | 377 | goto err_free_inst; |
378 | 378 | ||
diff --git a/crypto/cts.c b/crypto/cts.c index 51976187b2bf..00254d76b21b 100644 --- a/crypto/cts.c +++ b/crypto/cts.c | |||
@@ -290,7 +290,7 @@ static int crypto_cts_init_tfm(struct crypto_skcipher *tfm) | |||
290 | unsigned bsize; | 290 | unsigned bsize; |
291 | unsigned align; | 291 | unsigned align; |
292 | 292 | ||
293 | cipher = crypto_spawn_skcipher2(spawn); | 293 | cipher = crypto_spawn_skcipher(spawn); |
294 | if (IS_ERR(cipher)) | 294 | if (IS_ERR(cipher)) |
295 | return PTR_ERR(cipher); | 295 | return PTR_ERR(cipher); |
296 | 296 | ||
@@ -348,9 +348,9 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
348 | spawn = skcipher_instance_ctx(inst); | 348 | spawn = skcipher_instance_ctx(inst); |
349 | 349 | ||
350 | crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); | 350 | crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); |
351 | err = crypto_grab_skcipher2(spawn, cipher_name, 0, | 351 | err = crypto_grab_skcipher(spawn, cipher_name, 0, |
352 | crypto_requires_sync(algt->type, | 352 | crypto_requires_sync(algt->type, |
353 | algt->mask)); | 353 | algt->mask)); |
354 | if (err) | 354 | if (err) |
355 | goto err_free_inst; | 355 | goto err_free_inst; |
356 | 356 | ||
diff --git a/crypto/deflate.c b/crypto/deflate.c index 95d8d37c5021..f942cb391890 100644 --- a/crypto/deflate.c +++ b/crypto/deflate.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/interrupt.h> | 32 | #include <linux/interrupt.h> |
33 | #include <linux/mm.h> | 33 | #include <linux/mm.h> |
34 | #include <linux/net.h> | 34 | #include <linux/net.h> |
35 | #include <crypto/internal/scompress.h> | ||
35 | 36 | ||
36 | #define DEFLATE_DEF_LEVEL Z_DEFAULT_COMPRESSION | 37 | #define DEFLATE_DEF_LEVEL Z_DEFAULT_COMPRESSION |
37 | #define DEFLATE_DEF_WINBITS 11 | 38 | #define DEFLATE_DEF_WINBITS 11 |
@@ -101,9 +102,8 @@ static void deflate_decomp_exit(struct deflate_ctx *ctx) | |||
101 | vfree(ctx->decomp_stream.workspace); | 102 | vfree(ctx->decomp_stream.workspace); |
102 | } | 103 | } |
103 | 104 | ||
104 | static int deflate_init(struct crypto_tfm *tfm) | 105 | static int __deflate_init(void *ctx) |
105 | { | 106 | { |
106 | struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); | ||
107 | int ret; | 107 | int ret; |
108 | 108 | ||
109 | ret = deflate_comp_init(ctx); | 109 | ret = deflate_comp_init(ctx); |
@@ -116,19 +116,55 @@ out: | |||
116 | return ret; | 116 | return ret; |
117 | } | 117 | } |
118 | 118 | ||
119 | static void deflate_exit(struct crypto_tfm *tfm) | 119 | static void *deflate_alloc_ctx(struct crypto_scomp *tfm) |
120 | { | ||
121 | struct deflate_ctx *ctx; | ||
122 | int ret; | ||
123 | |||
124 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); | ||
125 | if (!ctx) | ||
126 | return ERR_PTR(-ENOMEM); | ||
127 | |||
128 | ret = __deflate_init(ctx); | ||
129 | if (ret) { | ||
130 | kfree(ctx); | ||
131 | return ERR_PTR(ret); | ||
132 | } | ||
133 | |||
134 | return ctx; | ||
135 | } | ||
136 | |||
137 | static int deflate_init(struct crypto_tfm *tfm) | ||
120 | { | 138 | { |
121 | struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); | 139 | struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); |
122 | 140 | ||
141 | return __deflate_init(ctx); | ||
142 | } | ||
143 | |||
144 | static void __deflate_exit(void *ctx) | ||
145 | { | ||
123 | deflate_comp_exit(ctx); | 146 | deflate_comp_exit(ctx); |
124 | deflate_decomp_exit(ctx); | 147 | deflate_decomp_exit(ctx); |
125 | } | 148 | } |
126 | 149 | ||
127 | static int deflate_compress(struct crypto_tfm *tfm, const u8 *src, | 150 | static void deflate_free_ctx(struct crypto_scomp *tfm, void *ctx) |
128 | unsigned int slen, u8 *dst, unsigned int *dlen) | 151 | { |
152 | __deflate_exit(ctx); | ||
153 | kzfree(ctx); | ||
154 | } | ||
155 | |||
156 | static void deflate_exit(struct crypto_tfm *tfm) | ||
157 | { | ||
158 | struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); | ||
159 | |||
160 | __deflate_exit(ctx); | ||
161 | } | ||
162 | |||
163 | static int __deflate_compress(const u8 *src, unsigned int slen, | ||
164 | u8 *dst, unsigned int *dlen, void *ctx) | ||
129 | { | 165 | { |
130 | int ret = 0; | 166 | int ret = 0; |
131 | struct deflate_ctx *dctx = crypto_tfm_ctx(tfm); | 167 | struct deflate_ctx *dctx = ctx; |
132 | struct z_stream_s *stream = &dctx->comp_stream; | 168 | struct z_stream_s *stream = &dctx->comp_stream; |
133 | 169 | ||
134 | ret = zlib_deflateReset(stream); | 170 | ret = zlib_deflateReset(stream); |
@@ -153,12 +189,27 @@ out: | |||
153 | return ret; | 189 | return ret; |
154 | } | 190 | } |
155 | 191 | ||
156 | static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src, | 192 | static int deflate_compress(struct crypto_tfm *tfm, const u8 *src, |
157 | unsigned int slen, u8 *dst, unsigned int *dlen) | 193 | unsigned int slen, u8 *dst, unsigned int *dlen) |
194 | { | ||
195 | struct deflate_ctx *dctx = crypto_tfm_ctx(tfm); | ||
196 | |||
197 | return __deflate_compress(src, slen, dst, dlen, dctx); | ||
198 | } | ||
199 | |||
200 | static int deflate_scompress(struct crypto_scomp *tfm, const u8 *src, | ||
201 | unsigned int slen, u8 *dst, unsigned int *dlen, | ||
202 | void *ctx) | ||
203 | { | ||
204 | return __deflate_compress(src, slen, dst, dlen, ctx); | ||
205 | } | ||
206 | |||
207 | static int __deflate_decompress(const u8 *src, unsigned int slen, | ||
208 | u8 *dst, unsigned int *dlen, void *ctx) | ||
158 | { | 209 | { |
159 | 210 | ||
160 | int ret = 0; | 211 | int ret = 0; |
161 | struct deflate_ctx *dctx = crypto_tfm_ctx(tfm); | 212 | struct deflate_ctx *dctx = ctx; |
162 | struct z_stream_s *stream = &dctx->decomp_stream; | 213 | struct z_stream_s *stream = &dctx->decomp_stream; |
163 | 214 | ||
164 | ret = zlib_inflateReset(stream); | 215 | ret = zlib_inflateReset(stream); |
@@ -194,6 +245,21 @@ out: | |||
194 | return ret; | 245 | return ret; |
195 | } | 246 | } |
196 | 247 | ||
248 | static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src, | ||
249 | unsigned int slen, u8 *dst, unsigned int *dlen) | ||
250 | { | ||
251 | struct deflate_ctx *dctx = crypto_tfm_ctx(tfm); | ||
252 | |||
253 | return __deflate_decompress(src, slen, dst, dlen, dctx); | ||
254 | } | ||
255 | |||
256 | static int deflate_sdecompress(struct crypto_scomp *tfm, const u8 *src, | ||
257 | unsigned int slen, u8 *dst, unsigned int *dlen, | ||
258 | void *ctx) | ||
259 | { | ||
260 | return __deflate_decompress(src, slen, dst, dlen, ctx); | ||
261 | } | ||
262 | |||
197 | static struct crypto_alg alg = { | 263 | static struct crypto_alg alg = { |
198 | .cra_name = "deflate", | 264 | .cra_name = "deflate", |
199 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, | 265 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, |
@@ -206,14 +272,39 @@ static struct crypto_alg alg = { | |||
206 | .coa_decompress = deflate_decompress } } | 272 | .coa_decompress = deflate_decompress } } |
207 | }; | 273 | }; |
208 | 274 | ||
275 | static struct scomp_alg scomp = { | ||
276 | .alloc_ctx = deflate_alloc_ctx, | ||
277 | .free_ctx = deflate_free_ctx, | ||
278 | .compress = deflate_scompress, | ||
279 | .decompress = deflate_sdecompress, | ||
280 | .base = { | ||
281 | .cra_name = "deflate", | ||
282 | .cra_driver_name = "deflate-scomp", | ||
283 | .cra_module = THIS_MODULE, | ||
284 | } | ||
285 | }; | ||
286 | |||
209 | static int __init deflate_mod_init(void) | 287 | static int __init deflate_mod_init(void) |
210 | { | 288 | { |
211 | return crypto_register_alg(&alg); | 289 | int ret; |
290 | |||
291 | ret = crypto_register_alg(&alg); | ||
292 | if (ret) | ||
293 | return ret; | ||
294 | |||
295 | ret = crypto_register_scomp(&scomp); | ||
296 | if (ret) { | ||
297 | crypto_unregister_alg(&alg); | ||
298 | return ret; | ||
299 | } | ||
300 | |||
301 | return ret; | ||
212 | } | 302 | } |
213 | 303 | ||
214 | static void __exit deflate_mod_fini(void) | 304 | static void __exit deflate_mod_fini(void) |
215 | { | 305 | { |
216 | crypto_unregister_alg(&alg); | 306 | crypto_unregister_alg(&alg); |
307 | crypto_unregister_scomp(&scomp); | ||
217 | } | 308 | } |
218 | 309 | ||
219 | module_init(deflate_mod_init); | 310 | module_init(deflate_mod_init); |
diff --git a/crypto/dh.c b/crypto/dh.c index 9d19360e7189..ddcb528ab2cc 100644 --- a/crypto/dh.c +++ b/crypto/dh.c | |||
@@ -118,7 +118,7 @@ static int dh_compute_value(struct kpp_request *req) | |||
118 | if (req->src) { | 118 | if (req->src) { |
119 | base = mpi_read_raw_from_sgl(req->src, req->src_len); | 119 | base = mpi_read_raw_from_sgl(req->src, req->src_len); |
120 | if (!base) { | 120 | if (!base) { |
121 | ret = EINVAL; | 121 | ret = -EINVAL; |
122 | goto err_free_val; | 122 | goto err_free_val; |
123 | } | 123 | } |
124 | } else { | 124 | } else { |
diff --git a/crypto/drbg.c b/crypto/drbg.c index 053035b5c8f8..8a4d98b4adba 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c | |||
@@ -1782,6 +1782,7 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg, | |||
1782 | memcpy(outbuf, drbg->outscratchpad, cryptlen); | 1782 | memcpy(outbuf, drbg->outscratchpad, cryptlen); |
1783 | 1783 | ||
1784 | outlen -= cryptlen; | 1784 | outlen -= cryptlen; |
1785 | outbuf += cryptlen; | ||
1785 | } | 1786 | } |
1786 | ret = 0; | 1787 | ret = 0; |
1787 | 1788 | ||
diff --git a/crypto/gcm.c b/crypto/gcm.c index f624ac98c94e..b7ad808be3d4 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c | |||
@@ -575,7 +575,7 @@ static int crypto_gcm_init_tfm(struct crypto_aead *tfm) | |||
575 | if (IS_ERR(ghash)) | 575 | if (IS_ERR(ghash)) |
576 | return PTR_ERR(ghash); | 576 | return PTR_ERR(ghash); |
577 | 577 | ||
578 | ctr = crypto_spawn_skcipher2(&ictx->ctr); | 578 | ctr = crypto_spawn_skcipher(&ictx->ctr); |
579 | err = PTR_ERR(ctr); | 579 | err = PTR_ERR(ctr); |
580 | if (IS_ERR(ctr)) | 580 | if (IS_ERR(ctr)) |
581 | goto err_free_hash; | 581 | goto err_free_hash; |
@@ -663,20 +663,20 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, | |||
663 | goto err_drop_ghash; | 663 | goto err_drop_ghash; |
664 | 664 | ||
665 | crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst)); | 665 | crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst)); |
666 | err = crypto_grab_skcipher2(&ctx->ctr, ctr_name, 0, | 666 | err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0, |
667 | crypto_requires_sync(algt->type, | 667 | crypto_requires_sync(algt->type, |
668 | algt->mask)); | 668 | algt->mask)); |
669 | if (err) | 669 | if (err) |
670 | goto err_drop_ghash; | 670 | goto err_drop_ghash; |
671 | 671 | ||
672 | ctr = crypto_spawn_skcipher_alg(&ctx->ctr); | 672 | ctr = crypto_spawn_skcipher_alg(&ctx->ctr); |
673 | 673 | ||
674 | /* We only support 16-byte blocks. */ | 674 | /* We only support 16-byte blocks. */ |
675 | err = -EINVAL; | ||
675 | if (crypto_skcipher_alg_ivsize(ctr) != 16) | 676 | if (crypto_skcipher_alg_ivsize(ctr) != 16) |
676 | goto out_put_ctr; | 677 | goto out_put_ctr; |
677 | 678 | ||
678 | /* Not a stream cipher? */ | 679 | /* Not a stream cipher? */ |
679 | err = -EINVAL; | ||
680 | if (ctr->base.cra_blocksize != 1) | 680 | if (ctr->base.cra_blocksize != 1) |
681 | goto out_put_ctr; | 681 | goto out_put_ctr; |
682 | 682 | ||
diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c index 5276607c72d0..72015fee533d 100644 --- a/crypto/gf128mul.c +++ b/crypto/gf128mul.c | |||
@@ -263,48 +263,6 @@ EXPORT_SYMBOL(gf128mul_bbe); | |||
263 | * t[1][BYTE] contains g*x^8*BYTE | 263 | * t[1][BYTE] contains g*x^8*BYTE |
264 | * .. | 264 | * .. |
265 | * t[15][BYTE] contains g*x^120*BYTE */ | 265 | * t[15][BYTE] contains g*x^120*BYTE */ |
266 | struct gf128mul_64k *gf128mul_init_64k_lle(const be128 *g) | ||
267 | { | ||
268 | struct gf128mul_64k *t; | ||
269 | int i, j, k; | ||
270 | |||
271 | t = kzalloc(sizeof(*t), GFP_KERNEL); | ||
272 | if (!t) | ||
273 | goto out; | ||
274 | |||
275 | for (i = 0; i < 16; i++) { | ||
276 | t->t[i] = kzalloc(sizeof(*t->t[i]), GFP_KERNEL); | ||
277 | if (!t->t[i]) { | ||
278 | gf128mul_free_64k(t); | ||
279 | t = NULL; | ||
280 | goto out; | ||
281 | } | ||
282 | } | ||
283 | |||
284 | t->t[0]->t[128] = *g; | ||
285 | for (j = 64; j > 0; j >>= 1) | ||
286 | gf128mul_x_lle(&t->t[0]->t[j], &t->t[0]->t[j + j]); | ||
287 | |||
288 | for (i = 0;;) { | ||
289 | for (j = 2; j < 256; j += j) | ||
290 | for (k = 1; k < j; ++k) | ||
291 | be128_xor(&t->t[i]->t[j + k], | ||
292 | &t->t[i]->t[j], &t->t[i]->t[k]); | ||
293 | |||
294 | if (++i >= 16) | ||
295 | break; | ||
296 | |||
297 | for (j = 128; j > 0; j >>= 1) { | ||
298 | t->t[i]->t[j] = t->t[i - 1]->t[j]; | ||
299 | gf128mul_x8_lle(&t->t[i]->t[j]); | ||
300 | } | ||
301 | } | ||
302 | |||
303 | out: | ||
304 | return t; | ||
305 | } | ||
306 | EXPORT_SYMBOL(gf128mul_init_64k_lle); | ||
307 | |||
308 | struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g) | 266 | struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g) |
309 | { | 267 | { |
310 | struct gf128mul_64k *t; | 268 | struct gf128mul_64k *t; |
@@ -352,24 +310,11 @@ void gf128mul_free_64k(struct gf128mul_64k *t) | |||
352 | int i; | 310 | int i; |
353 | 311 | ||
354 | for (i = 0; i < 16; i++) | 312 | for (i = 0; i < 16; i++) |
355 | kfree(t->t[i]); | 313 | kzfree(t->t[i]); |
356 | kfree(t); | 314 | kzfree(t); |
357 | } | 315 | } |
358 | EXPORT_SYMBOL(gf128mul_free_64k); | 316 | EXPORT_SYMBOL(gf128mul_free_64k); |
359 | 317 | ||
360 | void gf128mul_64k_lle(be128 *a, struct gf128mul_64k *t) | ||
361 | { | ||
362 | u8 *ap = (u8 *)a; | ||
363 | be128 r[1]; | ||
364 | int i; | ||
365 | |||
366 | *r = t->t[0]->t[ap[0]]; | ||
367 | for (i = 1; i < 16; ++i) | ||
368 | be128_xor(r, r, &t->t[i]->t[ap[i]]); | ||
369 | *a = *r; | ||
370 | } | ||
371 | EXPORT_SYMBOL(gf128mul_64k_lle); | ||
372 | |||
373 | void gf128mul_64k_bbe(be128 *a, struct gf128mul_64k *t) | 318 | void gf128mul_64k_bbe(be128 *a, struct gf128mul_64k *t) |
374 | { | 319 | { |
375 | u8 *ap = (u8 *)a; | 320 | u8 *ap = (u8 *)a; |
diff --git a/crypto/internal.h b/crypto/internal.h index 7eefcdb00227..f07320423191 100644 --- a/crypto/internal.h +++ b/crypto/internal.h | |||
@@ -76,9 +76,6 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); | |||
76 | int crypto_init_cipher_ops(struct crypto_tfm *tfm); | 76 | int crypto_init_cipher_ops(struct crypto_tfm *tfm); |
77 | int crypto_init_compress_ops(struct crypto_tfm *tfm); | 77 | int crypto_init_compress_ops(struct crypto_tfm *tfm); |
78 | 78 | ||
79 | void crypto_exit_cipher_ops(struct crypto_tfm *tfm); | ||
80 | void crypto_exit_compress_ops(struct crypto_tfm *tfm); | ||
81 | |||
82 | struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask); | 79 | struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask); |
83 | void crypto_larval_kill(struct crypto_alg *alg); | 80 | void crypto_larval_kill(struct crypto_alg *alg); |
84 | struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask); | 81 | struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask); |
diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c index c4938497eedb..787dccca3715 100644 --- a/crypto/jitterentropy-kcapi.c +++ b/crypto/jitterentropy-kcapi.c | |||
@@ -39,7 +39,6 @@ | |||
39 | 39 | ||
40 | #include <linux/module.h> | 40 | #include <linux/module.h> |
41 | #include <linux/slab.h> | 41 | #include <linux/slab.h> |
42 | #include <linux/module.h> | ||
43 | #include <linux/fips.h> | 42 | #include <linux/fips.h> |
44 | #include <linux/time.h> | 43 | #include <linux/time.h> |
45 | #include <linux/crypto.h> | 44 | #include <linux/crypto.h> |
diff --git a/crypto/lrw.c b/crypto/lrw.c index 6f9908a7ebcb..ecd8474018e3 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c | |||
@@ -17,7 +17,8 @@ | |||
17 | * | 17 | * |
18 | * The test vectors are included in the testing module tcrypt.[ch] */ | 18 | * The test vectors are included in the testing module tcrypt.[ch] */ |
19 | 19 | ||
20 | #include <crypto/algapi.h> | 20 | #include <crypto/internal/skcipher.h> |
21 | #include <crypto/scatterwalk.h> | ||
21 | #include <linux/err.h> | 22 | #include <linux/err.h> |
22 | #include <linux/init.h> | 23 | #include <linux/init.h> |
23 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
@@ -29,11 +30,30 @@ | |||
29 | #include <crypto/gf128mul.h> | 30 | #include <crypto/gf128mul.h> |
30 | #include <crypto/lrw.h> | 31 | #include <crypto/lrw.h> |
31 | 32 | ||
33 | #define LRW_BUFFER_SIZE 128u | ||
34 | |||
32 | struct priv { | 35 | struct priv { |
33 | struct crypto_cipher *child; | 36 | struct crypto_skcipher *child; |
34 | struct lrw_table_ctx table; | 37 | struct lrw_table_ctx table; |
35 | }; | 38 | }; |
36 | 39 | ||
40 | struct rctx { | ||
41 | be128 buf[LRW_BUFFER_SIZE / sizeof(be128)]; | ||
42 | |||
43 | be128 t; | ||
44 | |||
45 | be128 *ext; | ||
46 | |||
47 | struct scatterlist srcbuf[2]; | ||
48 | struct scatterlist dstbuf[2]; | ||
49 | struct scatterlist *src; | ||
50 | struct scatterlist *dst; | ||
51 | |||
52 | unsigned int left; | ||
53 | |||
54 | struct skcipher_request subreq; | ||
55 | }; | ||
56 | |||
37 | static inline void setbit128_bbe(void *b, int bit) | 57 | static inline void setbit128_bbe(void *b, int bit) |
38 | { | 58 | { |
39 | __set_bit(bit ^ (0x80 - | 59 | __set_bit(bit ^ (0x80 - |
@@ -76,32 +96,26 @@ void lrw_free_table(struct lrw_table_ctx *ctx) | |||
76 | } | 96 | } |
77 | EXPORT_SYMBOL_GPL(lrw_free_table); | 97 | EXPORT_SYMBOL_GPL(lrw_free_table); |
78 | 98 | ||
79 | static int setkey(struct crypto_tfm *parent, const u8 *key, | 99 | static int setkey(struct crypto_skcipher *parent, const u8 *key, |
80 | unsigned int keylen) | 100 | unsigned int keylen) |
81 | { | 101 | { |
82 | struct priv *ctx = crypto_tfm_ctx(parent); | 102 | struct priv *ctx = crypto_skcipher_ctx(parent); |
83 | struct crypto_cipher *child = ctx->child; | 103 | struct crypto_skcipher *child = ctx->child; |
84 | int err, bsize = LRW_BLOCK_SIZE; | 104 | int err, bsize = LRW_BLOCK_SIZE; |
85 | const u8 *tweak = key + keylen - bsize; | 105 | const u8 *tweak = key + keylen - bsize; |
86 | 106 | ||
87 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 107 | crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
88 | crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & | 108 | crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & |
89 | CRYPTO_TFM_REQ_MASK); | 109 | CRYPTO_TFM_REQ_MASK); |
90 | err = crypto_cipher_setkey(child, key, keylen - bsize); | 110 | err = crypto_skcipher_setkey(child, key, keylen - bsize); |
111 | crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & | ||
112 | CRYPTO_TFM_RES_MASK); | ||
91 | if (err) | 113 | if (err) |
92 | return err; | 114 | return err; |
93 | crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & | ||
94 | CRYPTO_TFM_RES_MASK); | ||
95 | 115 | ||
96 | return lrw_init_table(&ctx->table, tweak); | 116 | return lrw_init_table(&ctx->table, tweak); |
97 | } | 117 | } |
98 | 118 | ||
99 | struct sinfo { | ||
100 | be128 t; | ||
101 | struct crypto_tfm *tfm; | ||
102 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *); | ||
103 | }; | ||
104 | |||
105 | static inline void inc(be128 *iv) | 119 | static inline void inc(be128 *iv) |
106 | { | 120 | { |
107 | be64_add_cpu(&iv->b, 1); | 121 | be64_add_cpu(&iv->b, 1); |
@@ -109,13 +123,6 @@ static inline void inc(be128 *iv) | |||
109 | be64_add_cpu(&iv->a, 1); | 123 | be64_add_cpu(&iv->a, 1); |
110 | } | 124 | } |
111 | 125 | ||
112 | static inline void lrw_round(struct sinfo *s, void *dst, const void *src) | ||
113 | { | ||
114 | be128_xor(dst, &s->t, src); /* PP <- T xor P */ | ||
115 | s->fn(s->tfm, dst, dst); /* CC <- E(Key2,PP) */ | ||
116 | be128_xor(dst, dst, &s->t); /* C <- T xor CC */ | ||
117 | } | ||
118 | |||
119 | /* this returns the number of consequative 1 bits starting | 126 | /* this returns the number of consequative 1 bits starting |
120 | * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */ | 127 | * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */ |
121 | static inline int get_index128(be128 *block) | 128 | static inline int get_index128(be128 *block) |
@@ -135,83 +142,263 @@ static inline int get_index128(be128 *block) | |||
135 | return x; | 142 | return x; |
136 | } | 143 | } |
137 | 144 | ||
138 | static int crypt(struct blkcipher_desc *d, | 145 | static int post_crypt(struct skcipher_request *req) |
139 | struct blkcipher_walk *w, struct priv *ctx, | ||
140 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) | ||
141 | { | 146 | { |
147 | struct rctx *rctx = skcipher_request_ctx(req); | ||
148 | be128 *buf = rctx->ext ?: rctx->buf; | ||
149 | struct skcipher_request *subreq; | ||
150 | const int bs = LRW_BLOCK_SIZE; | ||
151 | struct skcipher_walk w; | ||
152 | struct scatterlist *sg; | ||
153 | unsigned offset; | ||
142 | int err; | 154 | int err; |
143 | unsigned int avail; | 155 | |
156 | subreq = &rctx->subreq; | ||
157 | err = skcipher_walk_virt(&w, subreq, false); | ||
158 | |||
159 | while (w.nbytes) { | ||
160 | unsigned int avail = w.nbytes; | ||
161 | be128 *wdst; | ||
162 | |||
163 | wdst = w.dst.virt.addr; | ||
164 | |||
165 | do { | ||
166 | be128_xor(wdst, buf++, wdst); | ||
167 | wdst++; | ||
168 | } while ((avail -= bs) >= bs); | ||
169 | |||
170 | err = skcipher_walk_done(&w, avail); | ||
171 | } | ||
172 | |||
173 | rctx->left -= subreq->cryptlen; | ||
174 | |||
175 | if (err || !rctx->left) | ||
176 | goto out; | ||
177 | |||
178 | rctx->dst = rctx->dstbuf; | ||
179 | |||
180 | scatterwalk_done(&w.out, 0, 1); | ||
181 | sg = w.out.sg; | ||
182 | offset = w.out.offset; | ||
183 | |||
184 | if (rctx->dst != sg) { | ||
185 | rctx->dst[0] = *sg; | ||
186 | sg_unmark_end(rctx->dst); | ||
187 | scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2); | ||
188 | } | ||
189 | rctx->dst[0].length -= offset - sg->offset; | ||
190 | rctx->dst[0].offset = offset; | ||
191 | |||
192 | out: | ||
193 | return err; | ||
194 | } | ||
195 | |||
196 | static int pre_crypt(struct skcipher_request *req) | ||
197 | { | ||
198 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | ||
199 | struct rctx *rctx = skcipher_request_ctx(req); | ||
200 | struct priv *ctx = crypto_skcipher_ctx(tfm); | ||
201 | be128 *buf = rctx->ext ?: rctx->buf; | ||
202 | struct skcipher_request *subreq; | ||
144 | const int bs = LRW_BLOCK_SIZE; | 203 | const int bs = LRW_BLOCK_SIZE; |
145 | struct sinfo s = { | 204 | struct skcipher_walk w; |
146 | .tfm = crypto_cipher_tfm(ctx->child), | 205 | struct scatterlist *sg; |
147 | .fn = fn | 206 | unsigned cryptlen; |
148 | }; | 207 | unsigned offset; |
149 | be128 *iv; | 208 | be128 *iv; |
150 | u8 *wsrc; | 209 | bool more; |
151 | u8 *wdst; | 210 | int err; |
152 | 211 | ||
153 | err = blkcipher_walk_virt(d, w); | 212 | subreq = &rctx->subreq; |
154 | if (!(avail = w->nbytes)) | 213 | skcipher_request_set_tfm(subreq, tfm); |
155 | return err; | ||
156 | 214 | ||
157 | wsrc = w->src.virt.addr; | 215 | cryptlen = subreq->cryptlen; |
158 | wdst = w->dst.virt.addr; | 216 | more = rctx->left > cryptlen; |
217 | if (!more) | ||
218 | cryptlen = rctx->left; | ||
159 | 219 | ||
160 | /* calculate first value of T */ | 220 | skcipher_request_set_crypt(subreq, rctx->src, rctx->dst, |
161 | iv = (be128 *)w->iv; | 221 | cryptlen, req->iv); |
162 | s.t = *iv; | ||
163 | 222 | ||
164 | /* T <- I*Key2 */ | 223 | err = skcipher_walk_virt(&w, subreq, false); |
165 | gf128mul_64k_bbe(&s.t, ctx->table.table); | 224 | iv = w.iv; |
166 | 225 | ||
167 | goto first; | 226 | while (w.nbytes) { |
227 | unsigned int avail = w.nbytes; | ||
228 | be128 *wsrc; | ||
229 | be128 *wdst; | ||
230 | |||
231 | wsrc = w.src.virt.addr; | ||
232 | wdst = w.dst.virt.addr; | ||
168 | 233 | ||
169 | for (;;) { | ||
170 | do { | 234 | do { |
235 | *buf++ = rctx->t; | ||
236 | be128_xor(wdst++, &rctx->t, wsrc++); | ||
237 | |||
171 | /* T <- I*Key2, using the optimization | 238 | /* T <- I*Key2, using the optimization |
172 | * discussed in the specification */ | 239 | * discussed in the specification */ |
173 | be128_xor(&s.t, &s.t, | 240 | be128_xor(&rctx->t, &rctx->t, |
174 | &ctx->table.mulinc[get_index128(iv)]); | 241 | &ctx->table.mulinc[get_index128(iv)]); |
175 | inc(iv); | 242 | inc(iv); |
243 | } while ((avail -= bs) >= bs); | ||
176 | 244 | ||
177 | first: | 245 | err = skcipher_walk_done(&w, avail); |
178 | lrw_round(&s, wdst, wsrc); | 246 | } |
179 | 247 | ||
180 | wsrc += bs; | 248 | skcipher_request_set_tfm(subreq, ctx->child); |
181 | wdst += bs; | 249 | skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst, |
182 | } while ((avail -= bs) >= bs); | 250 | cryptlen, NULL); |
183 | 251 | ||
184 | err = blkcipher_walk_done(d, w, avail); | 252 | if (err || !more) |
185 | if (!(avail = w->nbytes)) | 253 | goto out; |
186 | break; | 254 | |
255 | rctx->src = rctx->srcbuf; | ||
256 | |||
257 | scatterwalk_done(&w.in, 0, 1); | ||
258 | sg = w.in.sg; | ||
259 | offset = w.in.offset; | ||
260 | |||
261 | if (rctx->src != sg) { | ||
262 | rctx->src[0] = *sg; | ||
263 | sg_unmark_end(rctx->src); | ||
264 | scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2); | ||
265 | } | ||
266 | rctx->src[0].length -= offset - sg->offset; | ||
267 | rctx->src[0].offset = offset; | ||
268 | |||
269 | out: | ||
270 | return err; | ||
271 | } | ||
272 | |||
273 | static int init_crypt(struct skcipher_request *req, crypto_completion_t done) | ||
274 | { | ||
275 | struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); | ||
276 | struct rctx *rctx = skcipher_request_ctx(req); | ||
277 | struct skcipher_request *subreq; | ||
278 | gfp_t gfp; | ||
279 | |||
280 | subreq = &rctx->subreq; | ||
281 | skcipher_request_set_callback(subreq, req->base.flags, done, req); | ||
282 | |||
283 | gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | ||
284 | GFP_ATOMIC; | ||
285 | rctx->ext = NULL; | ||
286 | |||
287 | subreq->cryptlen = LRW_BUFFER_SIZE; | ||
288 | if (req->cryptlen > LRW_BUFFER_SIZE) { | ||
289 | subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE); | ||
290 | rctx->ext = kmalloc(subreq->cryptlen, gfp); | ||
291 | } | ||
292 | |||
293 | rctx->src = req->src; | ||
294 | rctx->dst = req->dst; | ||
295 | rctx->left = req->cryptlen; | ||
296 | |||
297 | /* calculate first value of T */ | ||
298 | memcpy(&rctx->t, req->iv, sizeof(rctx->t)); | ||
299 | |||
300 | /* T <- I*Key2 */ | ||
301 | gf128mul_64k_bbe(&rctx->t, ctx->table.table); | ||
187 | 302 | ||
188 | wsrc = w->src.virt.addr; | 303 | return 0; |
189 | wdst = w->dst.virt.addr; | 304 | } |
305 | |||
306 | static void exit_crypt(struct skcipher_request *req) | ||
307 | { | ||
308 | struct rctx *rctx = skcipher_request_ctx(req); | ||
309 | |||
310 | rctx->left = 0; | ||
311 | |||
312 | if (rctx->ext) | ||
313 | kfree(rctx->ext); | ||
314 | } | ||
315 | |||
316 | static int do_encrypt(struct skcipher_request *req, int err) | ||
317 | { | ||
318 | struct rctx *rctx = skcipher_request_ctx(req); | ||
319 | struct skcipher_request *subreq; | ||
320 | |||
321 | subreq = &rctx->subreq; | ||
322 | |||
323 | while (!err && rctx->left) { | ||
324 | err = pre_crypt(req) ?: | ||
325 | crypto_skcipher_encrypt(subreq) ?: | ||
326 | post_crypt(req); | ||
327 | |||
328 | if (err == -EINPROGRESS || | ||
329 | (err == -EBUSY && | ||
330 | req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | ||
331 | return err; | ||
190 | } | 332 | } |
191 | 333 | ||
334 | exit_crypt(req); | ||
192 | return err; | 335 | return err; |
193 | } | 336 | } |
194 | 337 | ||
195 | static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 338 | static void encrypt_done(struct crypto_async_request *areq, int err) |
196 | struct scatterlist *src, unsigned int nbytes) | 339 | { |
340 | struct skcipher_request *req = areq->data; | ||
341 | struct skcipher_request *subreq; | ||
342 | struct rctx *rctx; | ||
343 | |||
344 | rctx = skcipher_request_ctx(req); | ||
345 | subreq = &rctx->subreq; | ||
346 | subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; | ||
347 | |||
348 | err = do_encrypt(req, err ?: post_crypt(req)); | ||
349 | if (rctx->left) | ||
350 | return; | ||
351 | |||
352 | skcipher_request_complete(req, err); | ||
353 | } | ||
354 | |||
355 | static int encrypt(struct skcipher_request *req) | ||
356 | { | ||
357 | return do_encrypt(req, init_crypt(req, encrypt_done)); | ||
358 | } | ||
359 | |||
360 | static int do_decrypt(struct skcipher_request *req, int err) | ||
197 | { | 361 | { |
198 | struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); | 362 | struct rctx *rctx = skcipher_request_ctx(req); |
199 | struct blkcipher_walk w; | 363 | struct skcipher_request *subreq; |
364 | |||
365 | subreq = &rctx->subreq; | ||
366 | |||
367 | while (!err && rctx->left) { | ||
368 | err = pre_crypt(req) ?: | ||
369 | crypto_skcipher_decrypt(subreq) ?: | ||
370 | post_crypt(req); | ||
371 | |||
372 | if (err == -EINPROGRESS || | ||
373 | (err == -EBUSY && | ||
374 | req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | ||
375 | return err; | ||
376 | } | ||
200 | 377 | ||
201 | blkcipher_walk_init(&w, dst, src, nbytes); | 378 | exit_crypt(req); |
202 | return crypt(desc, &w, ctx, | 379 | return err; |
203 | crypto_cipher_alg(ctx->child)->cia_encrypt); | ||
204 | } | 380 | } |
205 | 381 | ||
206 | static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 382 | static void decrypt_done(struct crypto_async_request *areq, int err) |
207 | struct scatterlist *src, unsigned int nbytes) | ||
208 | { | 383 | { |
209 | struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); | 384 | struct skcipher_request *req = areq->data; |
210 | struct blkcipher_walk w; | 385 | struct skcipher_request *subreq; |
386 | struct rctx *rctx; | ||
387 | |||
388 | rctx = skcipher_request_ctx(req); | ||
389 | subreq = &rctx->subreq; | ||
390 | subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; | ||
391 | |||
392 | err = do_decrypt(req, err ?: post_crypt(req)); | ||
393 | if (rctx->left) | ||
394 | return; | ||
211 | 395 | ||
212 | blkcipher_walk_init(&w, dst, src, nbytes); | 396 | skcipher_request_complete(req, err); |
213 | return crypt(desc, &w, ctx, | 397 | } |
214 | crypto_cipher_alg(ctx->child)->cia_decrypt); | 398 | |
399 | static int decrypt(struct skcipher_request *req) | ||
400 | { | ||
401 | return do_decrypt(req, init_crypt(req, decrypt_done)); | ||
215 | } | 402 | } |
216 | 403 | ||
217 | int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst, | 404 | int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst, |
@@ -293,95 +480,161 @@ first: | |||
293 | } | 480 | } |
294 | EXPORT_SYMBOL_GPL(lrw_crypt); | 481 | EXPORT_SYMBOL_GPL(lrw_crypt); |
295 | 482 | ||
296 | static int init_tfm(struct crypto_tfm *tfm) | 483 | static int init_tfm(struct crypto_skcipher *tfm) |
297 | { | 484 | { |
298 | struct crypto_cipher *cipher; | 485 | struct skcipher_instance *inst = skcipher_alg_instance(tfm); |
299 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 486 | struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); |
300 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | 487 | struct priv *ctx = crypto_skcipher_ctx(tfm); |
301 | struct priv *ctx = crypto_tfm_ctx(tfm); | 488 | struct crypto_skcipher *cipher; |
302 | u32 *flags = &tfm->crt_flags; | ||
303 | 489 | ||
304 | cipher = crypto_spawn_cipher(spawn); | 490 | cipher = crypto_spawn_skcipher(spawn); |
305 | if (IS_ERR(cipher)) | 491 | if (IS_ERR(cipher)) |
306 | return PTR_ERR(cipher); | 492 | return PTR_ERR(cipher); |
307 | 493 | ||
308 | if (crypto_cipher_blocksize(cipher) != LRW_BLOCK_SIZE) { | ||
309 | *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; | ||
310 | crypto_free_cipher(cipher); | ||
311 | return -EINVAL; | ||
312 | } | ||
313 | |||
314 | ctx->child = cipher; | 494 | ctx->child = cipher; |
495 | |||
496 | crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) + | ||
497 | sizeof(struct rctx)); | ||
498 | |||
315 | return 0; | 499 | return 0; |
316 | } | 500 | } |
317 | 501 | ||
318 | static void exit_tfm(struct crypto_tfm *tfm) | 502 | static void exit_tfm(struct crypto_skcipher *tfm) |
319 | { | 503 | { |
320 | struct priv *ctx = crypto_tfm_ctx(tfm); | 504 | struct priv *ctx = crypto_skcipher_ctx(tfm); |
321 | 505 | ||
322 | lrw_free_table(&ctx->table); | 506 | lrw_free_table(&ctx->table); |
323 | crypto_free_cipher(ctx->child); | 507 | crypto_free_skcipher(ctx->child); |
508 | } | ||
509 | |||
510 | static void free(struct skcipher_instance *inst) | ||
511 | { | ||
512 | crypto_drop_skcipher(skcipher_instance_ctx(inst)); | ||
513 | kfree(inst); | ||
324 | } | 514 | } |
325 | 515 | ||
326 | static struct crypto_instance *alloc(struct rtattr **tb) | 516 | static int create(struct crypto_template *tmpl, struct rtattr **tb) |
327 | { | 517 | { |
328 | struct crypto_instance *inst; | 518 | struct crypto_skcipher_spawn *spawn; |
329 | struct crypto_alg *alg; | 519 | struct skcipher_instance *inst; |
520 | struct crypto_attr_type *algt; | ||
521 | struct skcipher_alg *alg; | ||
522 | const char *cipher_name; | ||
523 | char ecb_name[CRYPTO_MAX_ALG_NAME]; | ||
330 | int err; | 524 | int err; |
331 | 525 | ||
332 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); | 526 | algt = crypto_get_attr_type(tb); |
527 | if (IS_ERR(algt)) | ||
528 | return PTR_ERR(algt); | ||
529 | |||
530 | if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) | ||
531 | return -EINVAL; | ||
532 | |||
533 | cipher_name = crypto_attr_alg_name(tb[1]); | ||
534 | if (IS_ERR(cipher_name)) | ||
535 | return PTR_ERR(cipher_name); | ||
536 | |||
537 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); | ||
538 | if (!inst) | ||
539 | return -ENOMEM; | ||
540 | |||
541 | spawn = skcipher_instance_ctx(inst); | ||
542 | |||
543 | crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); | ||
544 | err = crypto_grab_skcipher(spawn, cipher_name, 0, | ||
545 | crypto_requires_sync(algt->type, | ||
546 | algt->mask)); | ||
547 | if (err == -ENOENT) { | ||
548 | err = -ENAMETOOLONG; | ||
549 | if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", | ||
550 | cipher_name) >= CRYPTO_MAX_ALG_NAME) | ||
551 | goto err_free_inst; | ||
552 | |||
553 | err = crypto_grab_skcipher(spawn, ecb_name, 0, | ||
554 | crypto_requires_sync(algt->type, | ||
555 | algt->mask)); | ||
556 | } | ||
557 | |||
333 | if (err) | 558 | if (err) |
334 | return ERR_PTR(err); | 559 | goto err_free_inst; |
335 | 560 | ||
336 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, | 561 | alg = crypto_skcipher_spawn_alg(spawn); |
337 | CRYPTO_ALG_TYPE_MASK); | ||
338 | if (IS_ERR(alg)) | ||
339 | return ERR_CAST(alg); | ||
340 | 562 | ||
341 | inst = crypto_alloc_instance("lrw", alg); | 563 | err = -EINVAL; |
342 | if (IS_ERR(inst)) | 564 | if (alg->base.cra_blocksize != LRW_BLOCK_SIZE) |
343 | goto out_put_alg; | 565 | goto err_drop_spawn; |
344 | 566 | ||
345 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; | 567 | if (crypto_skcipher_alg_ivsize(alg)) |
346 | inst->alg.cra_priority = alg->cra_priority; | 568 | goto err_drop_spawn; |
347 | inst->alg.cra_blocksize = alg->cra_blocksize; | ||
348 | 569 | ||
349 | if (alg->cra_alignmask < 7) inst->alg.cra_alignmask = 7; | 570 | err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw", |
350 | else inst->alg.cra_alignmask = alg->cra_alignmask; | 571 | &alg->base); |
351 | inst->alg.cra_type = &crypto_blkcipher_type; | 572 | if (err) |
573 | goto err_drop_spawn; | ||
352 | 574 | ||
353 | if (!(alg->cra_blocksize % 4)) | 575 | err = -EINVAL; |
354 | inst->alg.cra_alignmask |= 3; | 576 | cipher_name = alg->base.cra_name; |
355 | inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; | ||
356 | inst->alg.cra_blkcipher.min_keysize = | ||
357 | alg->cra_cipher.cia_min_keysize + alg->cra_blocksize; | ||
358 | inst->alg.cra_blkcipher.max_keysize = | ||
359 | alg->cra_cipher.cia_max_keysize + alg->cra_blocksize; | ||
360 | 577 | ||
361 | inst->alg.cra_ctxsize = sizeof(struct priv); | 578 | /* Alas we screwed up the naming so we have to mangle the |
579 | * cipher name. | ||
580 | */ | ||
581 | if (!strncmp(cipher_name, "ecb(", 4)) { | ||
582 | unsigned len; | ||
362 | 583 | ||
363 | inst->alg.cra_init = init_tfm; | 584 | len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name)); |
364 | inst->alg.cra_exit = exit_tfm; | 585 | if (len < 2 || len >= sizeof(ecb_name)) |
586 | goto err_drop_spawn; | ||
365 | 587 | ||
366 | inst->alg.cra_blkcipher.setkey = setkey; | 588 | if (ecb_name[len - 1] != ')') |
367 | inst->alg.cra_blkcipher.encrypt = encrypt; | 589 | goto err_drop_spawn; |
368 | inst->alg.cra_blkcipher.decrypt = decrypt; | ||
369 | 590 | ||
370 | out_put_alg: | 591 | ecb_name[len - 1] = 0; |
371 | crypto_mod_put(alg); | ||
372 | return inst; | ||
373 | } | ||
374 | 592 | ||
375 | static void free(struct crypto_instance *inst) | 593 | if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, |
376 | { | 594 | "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) |
377 | crypto_drop_spawn(crypto_instance_ctx(inst)); | 595 | return -ENAMETOOLONG; |
596 | } | ||
597 | |||
598 | inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; | ||
599 | inst->alg.base.cra_priority = alg->base.cra_priority; | ||
600 | inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE; | ||
601 | inst->alg.base.cra_alignmask = alg->base.cra_alignmask | | ||
602 | (__alignof__(u64) - 1); | ||
603 | |||
604 | inst->alg.ivsize = LRW_BLOCK_SIZE; | ||
605 | inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + | ||
606 | LRW_BLOCK_SIZE; | ||
607 | inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) + | ||
608 | LRW_BLOCK_SIZE; | ||
609 | |||
610 | inst->alg.base.cra_ctxsize = sizeof(struct priv); | ||
611 | |||
612 | inst->alg.init = init_tfm; | ||
613 | inst->alg.exit = exit_tfm; | ||
614 | |||
615 | inst->alg.setkey = setkey; | ||
616 | inst->alg.encrypt = encrypt; | ||
617 | inst->alg.decrypt = decrypt; | ||
618 | |||
619 | inst->free = free; | ||
620 | |||
621 | err = skcipher_register_instance(tmpl, inst); | ||
622 | if (err) | ||
623 | goto err_drop_spawn; | ||
624 | |||
625 | out: | ||
626 | return err; | ||
627 | |||
628 | err_drop_spawn: | ||
629 | crypto_drop_skcipher(spawn); | ||
630 | err_free_inst: | ||
378 | kfree(inst); | 631 | kfree(inst); |
632 | goto out; | ||
379 | } | 633 | } |
380 | 634 | ||
381 | static struct crypto_template crypto_tmpl = { | 635 | static struct crypto_template crypto_tmpl = { |
382 | .name = "lrw", | 636 | .name = "lrw", |
383 | .alloc = alloc, | 637 | .create = create, |
384 | .free = free, | ||
385 | .module = THIS_MODULE, | 638 | .module = THIS_MODULE, |
386 | }; | 639 | }; |
387 | 640 | ||
diff --git a/crypto/lz4.c b/crypto/lz4.c index aefbceaf3104..99c1b2cc2976 100644 --- a/crypto/lz4.c +++ b/crypto/lz4.c | |||
@@ -23,36 +23,53 @@ | |||
23 | #include <linux/crypto.h> | 23 | #include <linux/crypto.h> |
24 | #include <linux/vmalloc.h> | 24 | #include <linux/vmalloc.h> |
25 | #include <linux/lz4.h> | 25 | #include <linux/lz4.h> |
26 | #include <crypto/internal/scompress.h> | ||
26 | 27 | ||
27 | struct lz4_ctx { | 28 | struct lz4_ctx { |
28 | void *lz4_comp_mem; | 29 | void *lz4_comp_mem; |
29 | }; | 30 | }; |
30 | 31 | ||
32 | static void *lz4_alloc_ctx(struct crypto_scomp *tfm) | ||
33 | { | ||
34 | void *ctx; | ||
35 | |||
36 | ctx = vmalloc(LZ4_MEM_COMPRESS); | ||
37 | if (!ctx) | ||
38 | return ERR_PTR(-ENOMEM); | ||
39 | |||
40 | return ctx; | ||
41 | } | ||
42 | |||
31 | static int lz4_init(struct crypto_tfm *tfm) | 43 | static int lz4_init(struct crypto_tfm *tfm) |
32 | { | 44 | { |
33 | struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); | 45 | struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); |
34 | 46 | ||
35 | ctx->lz4_comp_mem = vmalloc(LZ4_MEM_COMPRESS); | 47 | ctx->lz4_comp_mem = lz4_alloc_ctx(NULL); |
36 | if (!ctx->lz4_comp_mem) | 48 | if (IS_ERR(ctx->lz4_comp_mem)) |
37 | return -ENOMEM; | 49 | return -ENOMEM; |
38 | 50 | ||
39 | return 0; | 51 | return 0; |
40 | } | 52 | } |
41 | 53 | ||
54 | static void lz4_free_ctx(struct crypto_scomp *tfm, void *ctx) | ||
55 | { | ||
56 | vfree(ctx); | ||
57 | } | ||
58 | |||
42 | static void lz4_exit(struct crypto_tfm *tfm) | 59 | static void lz4_exit(struct crypto_tfm *tfm) |
43 | { | 60 | { |
44 | struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); | 61 | struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); |
45 | vfree(ctx->lz4_comp_mem); | 62 | |
63 | lz4_free_ctx(NULL, ctx->lz4_comp_mem); | ||
46 | } | 64 | } |
47 | 65 | ||
48 | static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src, | 66 | static int __lz4_compress_crypto(const u8 *src, unsigned int slen, |
49 | unsigned int slen, u8 *dst, unsigned int *dlen) | 67 | u8 *dst, unsigned int *dlen, void *ctx) |
50 | { | 68 | { |
51 | struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); | ||
52 | size_t tmp_len = *dlen; | 69 | size_t tmp_len = *dlen; |
53 | int err; | 70 | int err; |
54 | 71 | ||
55 | err = lz4_compress(src, slen, dst, &tmp_len, ctx->lz4_comp_mem); | 72 | err = lz4_compress(src, slen, dst, &tmp_len, ctx); |
56 | 73 | ||
57 | if (err < 0) | 74 | if (err < 0) |
58 | return -EINVAL; | 75 | return -EINVAL; |
@@ -61,8 +78,23 @@ static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src, | |||
61 | return 0; | 78 | return 0; |
62 | } | 79 | } |
63 | 80 | ||
64 | static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src, | 81 | static int lz4_scompress(struct crypto_scomp *tfm, const u8 *src, |
65 | unsigned int slen, u8 *dst, unsigned int *dlen) | 82 | unsigned int slen, u8 *dst, unsigned int *dlen, |
83 | void *ctx) | ||
84 | { | ||
85 | return __lz4_compress_crypto(src, slen, dst, dlen, ctx); | ||
86 | } | ||
87 | |||
88 | static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src, | ||
89 | unsigned int slen, u8 *dst, unsigned int *dlen) | ||
90 | { | ||
91 | struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); | ||
92 | |||
93 | return __lz4_compress_crypto(src, slen, dst, dlen, ctx->lz4_comp_mem); | ||
94 | } | ||
95 | |||
96 | static int __lz4_decompress_crypto(const u8 *src, unsigned int slen, | ||
97 | u8 *dst, unsigned int *dlen, void *ctx) | ||
66 | { | 98 | { |
67 | int err; | 99 | int err; |
68 | size_t tmp_len = *dlen; | 100 | size_t tmp_len = *dlen; |
@@ -76,6 +108,20 @@ static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src, | |||
76 | return err; | 108 | return err; |
77 | } | 109 | } |
78 | 110 | ||
111 | static int lz4_sdecompress(struct crypto_scomp *tfm, const u8 *src, | ||
112 | unsigned int slen, u8 *dst, unsigned int *dlen, | ||
113 | void *ctx) | ||
114 | { | ||
115 | return __lz4_decompress_crypto(src, slen, dst, dlen, NULL); | ||
116 | } | ||
117 | |||
118 | static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src, | ||
119 | unsigned int slen, u8 *dst, | ||
120 | unsigned int *dlen) | ||
121 | { | ||
122 | return __lz4_decompress_crypto(src, slen, dst, dlen, NULL); | ||
123 | } | ||
124 | |||
79 | static struct crypto_alg alg_lz4 = { | 125 | static struct crypto_alg alg_lz4 = { |
80 | .cra_name = "lz4", | 126 | .cra_name = "lz4", |
81 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, | 127 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, |
@@ -89,14 +135,39 @@ static struct crypto_alg alg_lz4 = { | |||
89 | .coa_decompress = lz4_decompress_crypto } } | 135 | .coa_decompress = lz4_decompress_crypto } } |
90 | }; | 136 | }; |
91 | 137 | ||
138 | static struct scomp_alg scomp = { | ||
139 | .alloc_ctx = lz4_alloc_ctx, | ||
140 | .free_ctx = lz4_free_ctx, | ||
141 | .compress = lz4_scompress, | ||
142 | .decompress = lz4_sdecompress, | ||
143 | .base = { | ||
144 | .cra_name = "lz4", | ||
145 | .cra_driver_name = "lz4-scomp", | ||
146 | .cra_module = THIS_MODULE, | ||
147 | } | ||
148 | }; | ||
149 | |||
92 | static int __init lz4_mod_init(void) | 150 | static int __init lz4_mod_init(void) |
93 | { | 151 | { |
94 | return crypto_register_alg(&alg_lz4); | 152 | int ret; |
153 | |||
154 | ret = crypto_register_alg(&alg_lz4); | ||
155 | if (ret) | ||
156 | return ret; | ||
157 | |||
158 | ret = crypto_register_scomp(&scomp); | ||
159 | if (ret) { | ||
160 | crypto_unregister_alg(&alg_lz4); | ||
161 | return ret; | ||
162 | } | ||
163 | |||
164 | return ret; | ||
95 | } | 165 | } |
96 | 166 | ||
97 | static void __exit lz4_mod_fini(void) | 167 | static void __exit lz4_mod_fini(void) |
98 | { | 168 | { |
99 | crypto_unregister_alg(&alg_lz4); | 169 | crypto_unregister_alg(&alg_lz4); |
170 | crypto_unregister_scomp(&scomp); | ||
100 | } | 171 | } |
101 | 172 | ||
102 | module_init(lz4_mod_init); | 173 | module_init(lz4_mod_init); |
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c index a1d3b5bd3d85..75ffc4a3f786 100644 --- a/crypto/lz4hc.c +++ b/crypto/lz4hc.c | |||
@@ -22,37 +22,53 @@ | |||
22 | #include <linux/crypto.h> | 22 | #include <linux/crypto.h> |
23 | #include <linux/vmalloc.h> | 23 | #include <linux/vmalloc.h> |
24 | #include <linux/lz4.h> | 24 | #include <linux/lz4.h> |
25 | #include <crypto/internal/scompress.h> | ||
25 | 26 | ||
26 | struct lz4hc_ctx { | 27 | struct lz4hc_ctx { |
27 | void *lz4hc_comp_mem; | 28 | void *lz4hc_comp_mem; |
28 | }; | 29 | }; |
29 | 30 | ||
31 | static void *lz4hc_alloc_ctx(struct crypto_scomp *tfm) | ||
32 | { | ||
33 | void *ctx; | ||
34 | |||
35 | ctx = vmalloc(LZ4HC_MEM_COMPRESS); | ||
36 | if (!ctx) | ||
37 | return ERR_PTR(-ENOMEM); | ||
38 | |||
39 | return ctx; | ||
40 | } | ||
41 | |||
30 | static int lz4hc_init(struct crypto_tfm *tfm) | 42 | static int lz4hc_init(struct crypto_tfm *tfm) |
31 | { | 43 | { |
32 | struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); | 44 | struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); |
33 | 45 | ||
34 | ctx->lz4hc_comp_mem = vmalloc(LZ4HC_MEM_COMPRESS); | 46 | ctx->lz4hc_comp_mem = lz4hc_alloc_ctx(NULL); |
35 | if (!ctx->lz4hc_comp_mem) | 47 | if (IS_ERR(ctx->lz4hc_comp_mem)) |
36 | return -ENOMEM; | 48 | return -ENOMEM; |
37 | 49 | ||
38 | return 0; | 50 | return 0; |
39 | } | 51 | } |
40 | 52 | ||
53 | static void lz4hc_free_ctx(struct crypto_scomp *tfm, void *ctx) | ||
54 | { | ||
55 | vfree(ctx); | ||
56 | } | ||
57 | |||
41 | static void lz4hc_exit(struct crypto_tfm *tfm) | 58 | static void lz4hc_exit(struct crypto_tfm *tfm) |
42 | { | 59 | { |
43 | struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); | 60 | struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); |
44 | 61 | ||
45 | vfree(ctx->lz4hc_comp_mem); | 62 | lz4hc_free_ctx(NULL, ctx->lz4hc_comp_mem); |
46 | } | 63 | } |
47 | 64 | ||
48 | static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src, | 65 | static int __lz4hc_compress_crypto(const u8 *src, unsigned int slen, |
49 | unsigned int slen, u8 *dst, unsigned int *dlen) | 66 | u8 *dst, unsigned int *dlen, void *ctx) |
50 | { | 67 | { |
51 | struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); | ||
52 | size_t tmp_len = *dlen; | 68 | size_t tmp_len = *dlen; |
53 | int err; | 69 | int err; |
54 | 70 | ||
55 | err = lz4hc_compress(src, slen, dst, &tmp_len, ctx->lz4hc_comp_mem); | 71 | err = lz4hc_compress(src, slen, dst, &tmp_len, ctx); |
56 | 72 | ||
57 | if (err < 0) | 73 | if (err < 0) |
58 | return -EINVAL; | 74 | return -EINVAL; |
@@ -61,8 +77,25 @@ static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src, | |||
61 | return 0; | 77 | return 0; |
62 | } | 78 | } |
63 | 79 | ||
64 | static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src, | 80 | static int lz4hc_scompress(struct crypto_scomp *tfm, const u8 *src, |
65 | unsigned int slen, u8 *dst, unsigned int *dlen) | 81 | unsigned int slen, u8 *dst, unsigned int *dlen, |
82 | void *ctx) | ||
83 | { | ||
84 | return __lz4hc_compress_crypto(src, slen, dst, dlen, ctx); | ||
85 | } | ||
86 | |||
87 | static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src, | ||
88 | unsigned int slen, u8 *dst, | ||
89 | unsigned int *dlen) | ||
90 | { | ||
91 | struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); | ||
92 | |||
93 | return __lz4hc_compress_crypto(src, slen, dst, dlen, | ||
94 | ctx->lz4hc_comp_mem); | ||
95 | } | ||
96 | |||
97 | static int __lz4hc_decompress_crypto(const u8 *src, unsigned int slen, | ||
98 | u8 *dst, unsigned int *dlen, void *ctx) | ||
66 | { | 99 | { |
67 | int err; | 100 | int err; |
68 | size_t tmp_len = *dlen; | 101 | size_t tmp_len = *dlen; |
@@ -76,6 +109,20 @@ static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src, | |||
76 | return err; | 109 | return err; |
77 | } | 110 | } |
78 | 111 | ||
112 | static int lz4hc_sdecompress(struct crypto_scomp *tfm, const u8 *src, | ||
113 | unsigned int slen, u8 *dst, unsigned int *dlen, | ||
114 | void *ctx) | ||
115 | { | ||
116 | return __lz4hc_decompress_crypto(src, slen, dst, dlen, NULL); | ||
117 | } | ||
118 | |||
119 | static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src, | ||
120 | unsigned int slen, u8 *dst, | ||
121 | unsigned int *dlen) | ||
122 | { | ||
123 | return __lz4hc_decompress_crypto(src, slen, dst, dlen, NULL); | ||
124 | } | ||
125 | |||
79 | static struct crypto_alg alg_lz4hc = { | 126 | static struct crypto_alg alg_lz4hc = { |
80 | .cra_name = "lz4hc", | 127 | .cra_name = "lz4hc", |
81 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, | 128 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, |
@@ -89,14 +136,39 @@ static struct crypto_alg alg_lz4hc = { | |||
89 | .coa_decompress = lz4hc_decompress_crypto } } | 136 | .coa_decompress = lz4hc_decompress_crypto } } |
90 | }; | 137 | }; |
91 | 138 | ||
139 | static struct scomp_alg scomp = { | ||
140 | .alloc_ctx = lz4hc_alloc_ctx, | ||
141 | .free_ctx = lz4hc_free_ctx, | ||
142 | .compress = lz4hc_scompress, | ||
143 | .decompress = lz4hc_sdecompress, | ||
144 | .base = { | ||
145 | .cra_name = "lz4hc", | ||
146 | .cra_driver_name = "lz4hc-scomp", | ||
147 | .cra_module = THIS_MODULE, | ||
148 | } | ||
149 | }; | ||
150 | |||
92 | static int __init lz4hc_mod_init(void) | 151 | static int __init lz4hc_mod_init(void) |
93 | { | 152 | { |
94 | return crypto_register_alg(&alg_lz4hc); | 153 | int ret; |
154 | |||
155 | ret = crypto_register_alg(&alg_lz4hc); | ||
156 | if (ret) | ||
157 | return ret; | ||
158 | |||
159 | ret = crypto_register_scomp(&scomp); | ||
160 | if (ret) { | ||
161 | crypto_unregister_alg(&alg_lz4hc); | ||
162 | return ret; | ||
163 | } | ||
164 | |||
165 | return ret; | ||
95 | } | 166 | } |
96 | 167 | ||
97 | static void __exit lz4hc_mod_fini(void) | 168 | static void __exit lz4hc_mod_fini(void) |
98 | { | 169 | { |
99 | crypto_unregister_alg(&alg_lz4hc); | 170 | crypto_unregister_alg(&alg_lz4hc); |
171 | crypto_unregister_scomp(&scomp); | ||
100 | } | 172 | } |
101 | 173 | ||
102 | module_init(lz4hc_mod_init); | 174 | module_init(lz4hc_mod_init); |
diff --git a/crypto/lzo.c b/crypto/lzo.c index c3f3dd9a28c5..168df784da84 100644 --- a/crypto/lzo.c +++ b/crypto/lzo.c | |||
@@ -22,40 +22,55 @@ | |||
22 | #include <linux/vmalloc.h> | 22 | #include <linux/vmalloc.h> |
23 | #include <linux/mm.h> | 23 | #include <linux/mm.h> |
24 | #include <linux/lzo.h> | 24 | #include <linux/lzo.h> |
25 | #include <crypto/internal/scompress.h> | ||
25 | 26 | ||
26 | struct lzo_ctx { | 27 | struct lzo_ctx { |
27 | void *lzo_comp_mem; | 28 | void *lzo_comp_mem; |
28 | }; | 29 | }; |
29 | 30 | ||
31 | static void *lzo_alloc_ctx(struct crypto_scomp *tfm) | ||
32 | { | ||
33 | void *ctx; | ||
34 | |||
35 | ctx = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL | __GFP_NOWARN); | ||
36 | if (!ctx) | ||
37 | ctx = vmalloc(LZO1X_MEM_COMPRESS); | ||
38 | if (!ctx) | ||
39 | return ERR_PTR(-ENOMEM); | ||
40 | |||
41 | return ctx; | ||
42 | } | ||
43 | |||
30 | static int lzo_init(struct crypto_tfm *tfm) | 44 | static int lzo_init(struct crypto_tfm *tfm) |
31 | { | 45 | { |
32 | struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); | 46 | struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); |
33 | 47 | ||
34 | ctx->lzo_comp_mem = kmalloc(LZO1X_MEM_COMPRESS, | 48 | ctx->lzo_comp_mem = lzo_alloc_ctx(NULL); |
35 | GFP_KERNEL | __GFP_NOWARN); | 49 | if (IS_ERR(ctx->lzo_comp_mem)) |
36 | if (!ctx->lzo_comp_mem) | ||
37 | ctx->lzo_comp_mem = vmalloc(LZO1X_MEM_COMPRESS); | ||
38 | if (!ctx->lzo_comp_mem) | ||
39 | return -ENOMEM; | 50 | return -ENOMEM; |
40 | 51 | ||
41 | return 0; | 52 | return 0; |
42 | } | 53 | } |
43 | 54 | ||
55 | static void lzo_free_ctx(struct crypto_scomp *tfm, void *ctx) | ||
56 | { | ||
57 | kvfree(ctx); | ||
58 | } | ||
59 | |||
44 | static void lzo_exit(struct crypto_tfm *tfm) | 60 | static void lzo_exit(struct crypto_tfm *tfm) |
45 | { | 61 | { |
46 | struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); | 62 | struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); |
47 | 63 | ||
48 | kvfree(ctx->lzo_comp_mem); | 64 | lzo_free_ctx(NULL, ctx->lzo_comp_mem); |
49 | } | 65 | } |
50 | 66 | ||
51 | static int lzo_compress(struct crypto_tfm *tfm, const u8 *src, | 67 | static int __lzo_compress(const u8 *src, unsigned int slen, |
52 | unsigned int slen, u8 *dst, unsigned int *dlen) | 68 | u8 *dst, unsigned int *dlen, void *ctx) |
53 | { | 69 | { |
54 | struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); | ||
55 | size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ | 70 | size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ |
56 | int err; | 71 | int err; |
57 | 72 | ||
58 | err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx->lzo_comp_mem); | 73 | err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx); |
59 | 74 | ||
60 | if (err != LZO_E_OK) | 75 | if (err != LZO_E_OK) |
61 | return -EINVAL; | 76 | return -EINVAL; |
@@ -64,8 +79,23 @@ static int lzo_compress(struct crypto_tfm *tfm, const u8 *src, | |||
64 | return 0; | 79 | return 0; |
65 | } | 80 | } |
66 | 81 | ||
67 | static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src, | 82 | static int lzo_compress(struct crypto_tfm *tfm, const u8 *src, |
68 | unsigned int slen, u8 *dst, unsigned int *dlen) | 83 | unsigned int slen, u8 *dst, unsigned int *dlen) |
84 | { | ||
85 | struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); | ||
86 | |||
87 | return __lzo_compress(src, slen, dst, dlen, ctx->lzo_comp_mem); | ||
88 | } | ||
89 | |||
90 | static int lzo_scompress(struct crypto_scomp *tfm, const u8 *src, | ||
91 | unsigned int slen, u8 *dst, unsigned int *dlen, | ||
92 | void *ctx) | ||
93 | { | ||
94 | return __lzo_compress(src, slen, dst, dlen, ctx); | ||
95 | } | ||
96 | |||
97 | static int __lzo_decompress(const u8 *src, unsigned int slen, | ||
98 | u8 *dst, unsigned int *dlen) | ||
69 | { | 99 | { |
70 | int err; | 100 | int err; |
71 | size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ | 101 | size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ |
@@ -77,7 +107,19 @@ static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src, | |||
77 | 107 | ||
78 | *dlen = tmp_len; | 108 | *dlen = tmp_len; |
79 | return 0; | 109 | return 0; |
110 | } | ||
80 | 111 | ||
112 | static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src, | ||
113 | unsigned int slen, u8 *dst, unsigned int *dlen) | ||
114 | { | ||
115 | return __lzo_decompress(src, slen, dst, dlen); | ||
116 | } | ||
117 | |||
118 | static int lzo_sdecompress(struct crypto_scomp *tfm, const u8 *src, | ||
119 | unsigned int slen, u8 *dst, unsigned int *dlen, | ||
120 | void *ctx) | ||
121 | { | ||
122 | return __lzo_decompress(src, slen, dst, dlen); | ||
81 | } | 123 | } |
82 | 124 | ||
83 | static struct crypto_alg alg = { | 125 | static struct crypto_alg alg = { |
@@ -88,18 +130,43 @@ static struct crypto_alg alg = { | |||
88 | .cra_init = lzo_init, | 130 | .cra_init = lzo_init, |
89 | .cra_exit = lzo_exit, | 131 | .cra_exit = lzo_exit, |
90 | .cra_u = { .compress = { | 132 | .cra_u = { .compress = { |
91 | .coa_compress = lzo_compress, | 133 | .coa_compress = lzo_compress, |
92 | .coa_decompress = lzo_decompress } } | 134 | .coa_decompress = lzo_decompress } } |
135 | }; | ||
136 | |||
137 | static struct scomp_alg scomp = { | ||
138 | .alloc_ctx = lzo_alloc_ctx, | ||
139 | .free_ctx = lzo_free_ctx, | ||
140 | .compress = lzo_scompress, | ||
141 | .decompress = lzo_sdecompress, | ||
142 | .base = { | ||
143 | .cra_name = "lzo", | ||
144 | .cra_driver_name = "lzo-scomp", | ||
145 | .cra_module = THIS_MODULE, | ||
146 | } | ||
93 | }; | 147 | }; |
94 | 148 | ||
95 | static int __init lzo_mod_init(void) | 149 | static int __init lzo_mod_init(void) |
96 | { | 150 | { |
97 | return crypto_register_alg(&alg); | 151 | int ret; |
152 | |||
153 | ret = crypto_register_alg(&alg); | ||
154 | if (ret) | ||
155 | return ret; | ||
156 | |||
157 | ret = crypto_register_scomp(&scomp); | ||
158 | if (ret) { | ||
159 | crypto_unregister_alg(&alg); | ||
160 | return ret; | ||
161 | } | ||
162 | |||
163 | return ret; | ||
98 | } | 164 | } |
99 | 165 | ||
100 | static void __exit lzo_mod_fini(void) | 166 | static void __exit lzo_mod_fini(void) |
101 | { | 167 | { |
102 | crypto_unregister_alg(&alg); | 168 | crypto_unregister_alg(&alg); |
169 | crypto_unregister_scomp(&scomp); | ||
103 | } | 170 | } |
104 | 171 | ||
105 | module_init(lzo_mod_init); | 172 | module_init(lzo_mod_init); |
diff --git a/crypto/pcbc.c b/crypto/pcbc.c index f654965f0933..e4538e07f7ca 100644 --- a/crypto/pcbc.c +++ b/crypto/pcbc.c | |||
@@ -14,40 +14,37 @@ | |||
14 | * | 14 | * |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <crypto/algapi.h> | 17 | #include <crypto/internal/skcipher.h> |
18 | #include <linux/err.h> | 18 | #include <linux/err.h> |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/scatterlist.h> | ||
23 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
24 | 23 | ||
25 | struct crypto_pcbc_ctx { | 24 | struct crypto_pcbc_ctx { |
26 | struct crypto_cipher *child; | 25 | struct crypto_cipher *child; |
27 | }; | 26 | }; |
28 | 27 | ||
29 | static int crypto_pcbc_setkey(struct crypto_tfm *parent, const u8 *key, | 28 | static int crypto_pcbc_setkey(struct crypto_skcipher *parent, const u8 *key, |
30 | unsigned int keylen) | 29 | unsigned int keylen) |
31 | { | 30 | { |
32 | struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(parent); | 31 | struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(parent); |
33 | struct crypto_cipher *child = ctx->child; | 32 | struct crypto_cipher *child = ctx->child; |
34 | int err; | 33 | int err; |
35 | 34 | ||
36 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 35 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
37 | crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & | 36 | crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) & |
38 | CRYPTO_TFM_REQ_MASK); | 37 | CRYPTO_TFM_REQ_MASK); |
39 | err = crypto_cipher_setkey(child, key, keylen); | 38 | err = crypto_cipher_setkey(child, key, keylen); |
40 | crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & | 39 | crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) & |
41 | CRYPTO_TFM_RES_MASK); | 40 | CRYPTO_TFM_RES_MASK); |
42 | return err; | 41 | return err; |
43 | } | 42 | } |
44 | 43 | ||
45 | static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc, | 44 | static int crypto_pcbc_encrypt_segment(struct skcipher_request *req, |
46 | struct blkcipher_walk *walk, | 45 | struct skcipher_walk *walk, |
47 | struct crypto_cipher *tfm) | 46 | struct crypto_cipher *tfm) |
48 | { | 47 | { |
49 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = | ||
50 | crypto_cipher_alg(tfm)->cia_encrypt; | ||
51 | int bsize = crypto_cipher_blocksize(tfm); | 48 | int bsize = crypto_cipher_blocksize(tfm); |
52 | unsigned int nbytes = walk->nbytes; | 49 | unsigned int nbytes = walk->nbytes; |
53 | u8 *src = walk->src.virt.addr; | 50 | u8 *src = walk->src.virt.addr; |
@@ -56,7 +53,7 @@ static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc, | |||
56 | 53 | ||
57 | do { | 54 | do { |
58 | crypto_xor(iv, src, bsize); | 55 | crypto_xor(iv, src, bsize); |
59 | fn(crypto_cipher_tfm(tfm), dst, iv); | 56 | crypto_cipher_encrypt_one(tfm, dst, iv); |
60 | memcpy(iv, dst, bsize); | 57 | memcpy(iv, dst, bsize); |
61 | crypto_xor(iv, src, bsize); | 58 | crypto_xor(iv, src, bsize); |
62 | 59 | ||
@@ -67,12 +64,10 @@ static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc, | |||
67 | return nbytes; | 64 | return nbytes; |
68 | } | 65 | } |
69 | 66 | ||
70 | static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc, | 67 | static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req, |
71 | struct blkcipher_walk *walk, | 68 | struct skcipher_walk *walk, |
72 | struct crypto_cipher *tfm) | 69 | struct crypto_cipher *tfm) |
73 | { | 70 | { |
74 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = | ||
75 | crypto_cipher_alg(tfm)->cia_encrypt; | ||
76 | int bsize = crypto_cipher_blocksize(tfm); | 71 | int bsize = crypto_cipher_blocksize(tfm); |
77 | unsigned int nbytes = walk->nbytes; | 72 | unsigned int nbytes = walk->nbytes; |
78 | u8 *src = walk->src.virt.addr; | 73 | u8 *src = walk->src.virt.addr; |
@@ -82,7 +77,7 @@ static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc, | |||
82 | do { | 77 | do { |
83 | memcpy(tmpbuf, src, bsize); | 78 | memcpy(tmpbuf, src, bsize); |
84 | crypto_xor(iv, src, bsize); | 79 | crypto_xor(iv, src, bsize); |
85 | fn(crypto_cipher_tfm(tfm), src, iv); | 80 | crypto_cipher_encrypt_one(tfm, src, iv); |
86 | memcpy(iv, tmpbuf, bsize); | 81 | memcpy(iv, tmpbuf, bsize); |
87 | crypto_xor(iv, src, bsize); | 82 | crypto_xor(iv, src, bsize); |
88 | 83 | ||
@@ -94,38 +89,34 @@ static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc, | |||
94 | return nbytes; | 89 | return nbytes; |
95 | } | 90 | } |
96 | 91 | ||
97 | static int crypto_pcbc_encrypt(struct blkcipher_desc *desc, | 92 | static int crypto_pcbc_encrypt(struct skcipher_request *req) |
98 | struct scatterlist *dst, struct scatterlist *src, | ||
99 | unsigned int nbytes) | ||
100 | { | 93 | { |
101 | struct blkcipher_walk walk; | 94 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
102 | struct crypto_blkcipher *tfm = desc->tfm; | 95 | struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm); |
103 | struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm); | ||
104 | struct crypto_cipher *child = ctx->child; | 96 | struct crypto_cipher *child = ctx->child; |
97 | struct skcipher_walk walk; | ||
98 | unsigned int nbytes; | ||
105 | int err; | 99 | int err; |
106 | 100 | ||
107 | blkcipher_walk_init(&walk, dst, src, nbytes); | 101 | err = skcipher_walk_virt(&walk, req, false); |
108 | err = blkcipher_walk_virt(desc, &walk); | ||
109 | 102 | ||
110 | while ((nbytes = walk.nbytes)) { | 103 | while ((nbytes = walk.nbytes)) { |
111 | if (walk.src.virt.addr == walk.dst.virt.addr) | 104 | if (walk.src.virt.addr == walk.dst.virt.addr) |
112 | nbytes = crypto_pcbc_encrypt_inplace(desc, &walk, | 105 | nbytes = crypto_pcbc_encrypt_inplace(req, &walk, |
113 | child); | 106 | child); |
114 | else | 107 | else |
115 | nbytes = crypto_pcbc_encrypt_segment(desc, &walk, | 108 | nbytes = crypto_pcbc_encrypt_segment(req, &walk, |
116 | child); | 109 | child); |
117 | err = blkcipher_walk_done(desc, &walk, nbytes); | 110 | err = skcipher_walk_done(&walk, nbytes); |
118 | } | 111 | } |
119 | 112 | ||
120 | return err; | 113 | return err; |
121 | } | 114 | } |
122 | 115 | ||
123 | static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc, | 116 | static int crypto_pcbc_decrypt_segment(struct skcipher_request *req, |
124 | struct blkcipher_walk *walk, | 117 | struct skcipher_walk *walk, |
125 | struct crypto_cipher *tfm) | 118 | struct crypto_cipher *tfm) |
126 | { | 119 | { |
127 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = | ||
128 | crypto_cipher_alg(tfm)->cia_decrypt; | ||
129 | int bsize = crypto_cipher_blocksize(tfm); | 120 | int bsize = crypto_cipher_blocksize(tfm); |
130 | unsigned int nbytes = walk->nbytes; | 121 | unsigned int nbytes = walk->nbytes; |
131 | u8 *src = walk->src.virt.addr; | 122 | u8 *src = walk->src.virt.addr; |
@@ -133,7 +124,7 @@ static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc, | |||
133 | u8 *iv = walk->iv; | 124 | u8 *iv = walk->iv; |
134 | 125 | ||
135 | do { | 126 | do { |
136 | fn(crypto_cipher_tfm(tfm), dst, src); | 127 | crypto_cipher_decrypt_one(tfm, dst, src); |
137 | crypto_xor(dst, iv, bsize); | 128 | crypto_xor(dst, iv, bsize); |
138 | memcpy(iv, src, bsize); | 129 | memcpy(iv, src, bsize); |
139 | crypto_xor(iv, dst, bsize); | 130 | crypto_xor(iv, dst, bsize); |
@@ -147,21 +138,19 @@ static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc, | |||
147 | return nbytes; | 138 | return nbytes; |
148 | } | 139 | } |
149 | 140 | ||
150 | static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc, | 141 | static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req, |
151 | struct blkcipher_walk *walk, | 142 | struct skcipher_walk *walk, |
152 | struct crypto_cipher *tfm) | 143 | struct crypto_cipher *tfm) |
153 | { | 144 | { |
154 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = | ||
155 | crypto_cipher_alg(tfm)->cia_decrypt; | ||
156 | int bsize = crypto_cipher_blocksize(tfm); | 145 | int bsize = crypto_cipher_blocksize(tfm); |
157 | unsigned int nbytes = walk->nbytes; | 146 | unsigned int nbytes = walk->nbytes; |
158 | u8 *src = walk->src.virt.addr; | 147 | u8 *src = walk->src.virt.addr; |
159 | u8 *iv = walk->iv; | 148 | u8 *iv = walk->iv; |
160 | u8 tmpbuf[bsize]; | 149 | u8 tmpbuf[bsize] __attribute__ ((aligned(__alignof__(u32)))); |
161 | 150 | ||
162 | do { | 151 | do { |
163 | memcpy(tmpbuf, src, bsize); | 152 | memcpy(tmpbuf, src, bsize); |
164 | fn(crypto_cipher_tfm(tfm), src, src); | 153 | crypto_cipher_decrypt_one(tfm, src, src); |
165 | crypto_xor(src, iv, bsize); | 154 | crypto_xor(src, iv, bsize); |
166 | memcpy(iv, tmpbuf, bsize); | 155 | memcpy(iv, tmpbuf, bsize); |
167 | crypto_xor(iv, src, bsize); | 156 | crypto_xor(iv, src, bsize); |
@@ -174,37 +163,35 @@ static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc, | |||
174 | return nbytes; | 163 | return nbytes; |
175 | } | 164 | } |
176 | 165 | ||
177 | static int crypto_pcbc_decrypt(struct blkcipher_desc *desc, | 166 | static int crypto_pcbc_decrypt(struct skcipher_request *req) |
178 | struct scatterlist *dst, struct scatterlist *src, | ||
179 | unsigned int nbytes) | ||
180 | { | 167 | { |
181 | struct blkcipher_walk walk; | 168 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
182 | struct crypto_blkcipher *tfm = desc->tfm; | 169 | struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm); |
183 | struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm); | ||
184 | struct crypto_cipher *child = ctx->child; | 170 | struct crypto_cipher *child = ctx->child; |
171 | struct skcipher_walk walk; | ||
172 | unsigned int nbytes; | ||
185 | int err; | 173 | int err; |
186 | 174 | ||
187 | blkcipher_walk_init(&walk, dst, src, nbytes); | 175 | err = skcipher_walk_virt(&walk, req, false); |
188 | err = blkcipher_walk_virt(desc, &walk); | ||
189 | 176 | ||
190 | while ((nbytes = walk.nbytes)) { | 177 | while ((nbytes = walk.nbytes)) { |
191 | if (walk.src.virt.addr == walk.dst.virt.addr) | 178 | if (walk.src.virt.addr == walk.dst.virt.addr) |
192 | nbytes = crypto_pcbc_decrypt_inplace(desc, &walk, | 179 | nbytes = crypto_pcbc_decrypt_inplace(req, &walk, |
193 | child); | 180 | child); |
194 | else | 181 | else |
195 | nbytes = crypto_pcbc_decrypt_segment(desc, &walk, | 182 | nbytes = crypto_pcbc_decrypt_segment(req, &walk, |
196 | child); | 183 | child); |
197 | err = blkcipher_walk_done(desc, &walk, nbytes); | 184 | err = skcipher_walk_done(&walk, nbytes); |
198 | } | 185 | } |
199 | 186 | ||
200 | return err; | 187 | return err; |
201 | } | 188 | } |
202 | 189 | ||
203 | static int crypto_pcbc_init_tfm(struct crypto_tfm *tfm) | 190 | static int crypto_pcbc_init_tfm(struct crypto_skcipher *tfm) |
204 | { | 191 | { |
205 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 192 | struct skcipher_instance *inst = skcipher_alg_instance(tfm); |
206 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | 193 | struct crypto_spawn *spawn = skcipher_instance_ctx(inst); |
207 | struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(tfm); | 194 | struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm); |
208 | struct crypto_cipher *cipher; | 195 | struct crypto_cipher *cipher; |
209 | 196 | ||
210 | cipher = crypto_spawn_cipher(spawn); | 197 | cipher = crypto_spawn_cipher(spawn); |
@@ -215,68 +202,98 @@ static int crypto_pcbc_init_tfm(struct crypto_tfm *tfm) | |||
215 | return 0; | 202 | return 0; |
216 | } | 203 | } |
217 | 204 | ||
218 | static void crypto_pcbc_exit_tfm(struct crypto_tfm *tfm) | 205 | static void crypto_pcbc_exit_tfm(struct crypto_skcipher *tfm) |
219 | { | 206 | { |
220 | struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(tfm); | 207 | struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm); |
208 | |||
221 | crypto_free_cipher(ctx->child); | 209 | crypto_free_cipher(ctx->child); |
222 | } | 210 | } |
223 | 211 | ||
224 | static struct crypto_instance *crypto_pcbc_alloc(struct rtattr **tb) | 212 | static void crypto_pcbc_free(struct skcipher_instance *inst) |
213 | { | ||
214 | crypto_drop_skcipher(skcipher_instance_ctx(inst)); | ||
215 | kfree(inst); | ||
216 | } | ||
217 | |||
218 | static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb) | ||
225 | { | 219 | { |
226 | struct crypto_instance *inst; | 220 | struct skcipher_instance *inst; |
221 | struct crypto_attr_type *algt; | ||
222 | struct crypto_spawn *spawn; | ||
227 | struct crypto_alg *alg; | 223 | struct crypto_alg *alg; |
228 | int err; | 224 | int err; |
229 | 225 | ||
230 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); | 226 | algt = crypto_get_attr_type(tb); |
231 | if (err) | 227 | if (IS_ERR(algt)) |
232 | return ERR_PTR(err); | 228 | return PTR_ERR(algt); |
229 | |||
230 | if (((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) & | ||
231 | ~CRYPTO_ALG_INTERNAL) | ||
232 | return -EINVAL; | ||
233 | 233 | ||
234 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, | 234 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); |
235 | CRYPTO_ALG_TYPE_MASK); | 235 | if (!inst) |
236 | return -ENOMEM; | ||
237 | |||
238 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER | | ||
239 | (algt->type & CRYPTO_ALG_INTERNAL), | ||
240 | CRYPTO_ALG_TYPE_MASK | | ||
241 | (algt->mask & CRYPTO_ALG_INTERNAL)); | ||
242 | err = PTR_ERR(alg); | ||
236 | if (IS_ERR(alg)) | 243 | if (IS_ERR(alg)) |
237 | return ERR_CAST(alg); | 244 | goto err_free_inst; |
245 | |||
246 | spawn = skcipher_instance_ctx(inst); | ||
247 | err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), | ||
248 | CRYPTO_ALG_TYPE_MASK); | ||
249 | crypto_mod_put(alg); | ||
250 | if (err) | ||
251 | goto err_free_inst; | ||
238 | 252 | ||
239 | inst = crypto_alloc_instance("pcbc", alg); | 253 | err = crypto_inst_setname(skcipher_crypto_instance(inst), "pcbc", alg); |
240 | if (IS_ERR(inst)) | 254 | if (err) |
241 | goto out_put_alg; | 255 | goto err_drop_spawn; |
242 | 256 | ||
243 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; | 257 | inst->alg.base.cra_flags = alg->cra_flags & CRYPTO_ALG_INTERNAL; |
244 | inst->alg.cra_priority = alg->cra_priority; | 258 | inst->alg.base.cra_priority = alg->cra_priority; |
245 | inst->alg.cra_blocksize = alg->cra_blocksize; | 259 | inst->alg.base.cra_blocksize = alg->cra_blocksize; |
246 | inst->alg.cra_alignmask = alg->cra_alignmask; | 260 | inst->alg.base.cra_alignmask = alg->cra_alignmask; |
247 | inst->alg.cra_type = &crypto_blkcipher_type; | ||
248 | 261 | ||
249 | /* We access the data as u32s when xoring. */ | 262 | /* We access the data as u32s when xoring. */ |
250 | inst->alg.cra_alignmask |= __alignof__(u32) - 1; | 263 | inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; |
251 | 264 | ||
252 | inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; | 265 | inst->alg.ivsize = alg->cra_blocksize; |
253 | inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; | 266 | inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize; |
254 | inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; | 267 | inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize; |
255 | 268 | ||
256 | inst->alg.cra_ctxsize = sizeof(struct crypto_pcbc_ctx); | 269 | inst->alg.base.cra_ctxsize = sizeof(struct crypto_pcbc_ctx); |
257 | 270 | ||
258 | inst->alg.cra_init = crypto_pcbc_init_tfm; | 271 | inst->alg.init = crypto_pcbc_init_tfm; |
259 | inst->alg.cra_exit = crypto_pcbc_exit_tfm; | 272 | inst->alg.exit = crypto_pcbc_exit_tfm; |
260 | 273 | ||
261 | inst->alg.cra_blkcipher.setkey = crypto_pcbc_setkey; | 274 | inst->alg.setkey = crypto_pcbc_setkey; |
262 | inst->alg.cra_blkcipher.encrypt = crypto_pcbc_encrypt; | 275 | inst->alg.encrypt = crypto_pcbc_encrypt; |
263 | inst->alg.cra_blkcipher.decrypt = crypto_pcbc_decrypt; | 276 | inst->alg.decrypt = crypto_pcbc_decrypt; |
264 | 277 | ||
265 | out_put_alg: | 278 | inst->free = crypto_pcbc_free; |
266 | crypto_mod_put(alg); | ||
267 | return inst; | ||
268 | } | ||
269 | 279 | ||
270 | static void crypto_pcbc_free(struct crypto_instance *inst) | 280 | err = skcipher_register_instance(tmpl, inst); |
271 | { | 281 | if (err) |
272 | crypto_drop_spawn(crypto_instance_ctx(inst)); | 282 | goto err_drop_spawn; |
283 | |||
284 | out: | ||
285 | return err; | ||
286 | |||
287 | err_drop_spawn: | ||
288 | crypto_drop_spawn(spawn); | ||
289 | err_free_inst: | ||
273 | kfree(inst); | 290 | kfree(inst); |
291 | goto out; | ||
274 | } | 292 | } |
275 | 293 | ||
276 | static struct crypto_template crypto_pcbc_tmpl = { | 294 | static struct crypto_template crypto_pcbc_tmpl = { |
277 | .name = "pcbc", | 295 | .name = "pcbc", |
278 | .alloc = crypto_pcbc_alloc, | 296 | .create = crypto_pcbc_create, |
279 | .free = crypto_pcbc_free, | ||
280 | .module = THIS_MODULE, | 297 | .module = THIS_MODULE, |
281 | }; | 298 | }; |
282 | 299 | ||
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c index 2df9835dfbc0..b1c2d57dc734 100644 --- a/crypto/poly1305_generic.c +++ b/crypto/poly1305_generic.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/crypto.h> | 17 | #include <linux/crypto.h> |
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <asm/unaligned.h> | ||
20 | 21 | ||
21 | static inline u64 mlt(u64 a, u64 b) | 22 | static inline u64 mlt(u64 a, u64 b) |
22 | { | 23 | { |
@@ -33,11 +34,6 @@ static inline u32 and(u32 v, u32 mask) | |||
33 | return v & mask; | 34 | return v & mask; |
34 | } | 35 | } |
35 | 36 | ||
36 | static inline u32 le32_to_cpuvp(const void *p) | ||
37 | { | ||
38 | return le32_to_cpup(p); | ||
39 | } | ||
40 | |||
41 | int crypto_poly1305_init(struct shash_desc *desc) | 37 | int crypto_poly1305_init(struct shash_desc *desc) |
42 | { | 38 | { |
43 | struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); | 39 | struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); |
@@ -65,19 +61,19 @@ EXPORT_SYMBOL_GPL(crypto_poly1305_setkey); | |||
65 | static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key) | 61 | static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key) |
66 | { | 62 | { |
67 | /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ | 63 | /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ |
68 | dctx->r[0] = (le32_to_cpuvp(key + 0) >> 0) & 0x3ffffff; | 64 | dctx->r[0] = (get_unaligned_le32(key + 0) >> 0) & 0x3ffffff; |
69 | dctx->r[1] = (le32_to_cpuvp(key + 3) >> 2) & 0x3ffff03; | 65 | dctx->r[1] = (get_unaligned_le32(key + 3) >> 2) & 0x3ffff03; |
70 | dctx->r[2] = (le32_to_cpuvp(key + 6) >> 4) & 0x3ffc0ff; | 66 | dctx->r[2] = (get_unaligned_le32(key + 6) >> 4) & 0x3ffc0ff; |
71 | dctx->r[3] = (le32_to_cpuvp(key + 9) >> 6) & 0x3f03fff; | 67 | dctx->r[3] = (get_unaligned_le32(key + 9) >> 6) & 0x3f03fff; |
72 | dctx->r[4] = (le32_to_cpuvp(key + 12) >> 8) & 0x00fffff; | 68 | dctx->r[4] = (get_unaligned_le32(key + 12) >> 8) & 0x00fffff; |
73 | } | 69 | } |
74 | 70 | ||
75 | static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key) | 71 | static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key) |
76 | { | 72 | { |
77 | dctx->s[0] = le32_to_cpuvp(key + 0); | 73 | dctx->s[0] = get_unaligned_le32(key + 0); |
78 | dctx->s[1] = le32_to_cpuvp(key + 4); | 74 | dctx->s[1] = get_unaligned_le32(key + 4); |
79 | dctx->s[2] = le32_to_cpuvp(key + 8); | 75 | dctx->s[2] = get_unaligned_le32(key + 8); |
80 | dctx->s[3] = le32_to_cpuvp(key + 12); | 76 | dctx->s[3] = get_unaligned_le32(key + 12); |
81 | } | 77 | } |
82 | 78 | ||
83 | unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, | 79 | unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, |
@@ -137,11 +133,11 @@ static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx, | |||
137 | while (likely(srclen >= POLY1305_BLOCK_SIZE)) { | 133 | while (likely(srclen >= POLY1305_BLOCK_SIZE)) { |
138 | 134 | ||
139 | /* h += m[i] */ | 135 | /* h += m[i] */ |
140 | h0 += (le32_to_cpuvp(src + 0) >> 0) & 0x3ffffff; | 136 | h0 += (get_unaligned_le32(src + 0) >> 0) & 0x3ffffff; |
141 | h1 += (le32_to_cpuvp(src + 3) >> 2) & 0x3ffffff; | 137 | h1 += (get_unaligned_le32(src + 3) >> 2) & 0x3ffffff; |
142 | h2 += (le32_to_cpuvp(src + 6) >> 4) & 0x3ffffff; | 138 | h2 += (get_unaligned_le32(src + 6) >> 4) & 0x3ffffff; |
143 | h3 += (le32_to_cpuvp(src + 9) >> 6) & 0x3ffffff; | 139 | h3 += (get_unaligned_le32(src + 9) >> 6) & 0x3ffffff; |
144 | h4 += (le32_to_cpuvp(src + 12) >> 8) | hibit; | 140 | h4 += (get_unaligned_le32(src + 12) >> 8) | hibit; |
145 | 141 | ||
146 | /* h *= r */ | 142 | /* h *= r */ |
147 | d0 = mlt(h0, r0) + mlt(h1, s4) + mlt(h2, s3) + | 143 | d0 = mlt(h0, r0) + mlt(h1, s4) + mlt(h2, s3) + |
diff --git a/crypto/scompress.c b/crypto/scompress.c new file mode 100644 index 000000000000..35e396d154b7 --- /dev/null +++ b/crypto/scompress.c | |||
@@ -0,0 +1,356 @@ | |||
1 | /* | ||
2 | * Synchronous Compression operations | ||
3 | * | ||
4 | * Copyright 2015 LG Electronics Inc. | ||
5 | * Copyright (c) 2016, Intel Corporation | ||
6 | * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the Free | ||
10 | * Software Foundation; either version 2 of the License, or (at your option) | ||
11 | * any later version. | ||
12 | * | ||
13 | */ | ||
14 | #include <linux/errno.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/seq_file.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/string.h> | ||
20 | #include <linux/crypto.h> | ||
21 | #include <linux/vmalloc.h> | ||
22 | #include <crypto/algapi.h> | ||
23 | #include <linux/cryptouser.h> | ||
24 | #include <net/netlink.h> | ||
25 | #include <linux/scatterlist.h> | ||
26 | #include <crypto/scatterwalk.h> | ||
27 | #include <crypto/internal/acompress.h> | ||
28 | #include <crypto/internal/scompress.h> | ||
29 | #include "internal.h" | ||
30 | |||
31 | static const struct crypto_type crypto_scomp_type; | ||
32 | static void * __percpu *scomp_src_scratches; | ||
33 | static void * __percpu *scomp_dst_scratches; | ||
34 | static int scomp_scratch_users; | ||
35 | static DEFINE_MUTEX(scomp_lock); | ||
36 | |||
37 | #ifdef CONFIG_NET | ||
38 | static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg) | ||
39 | { | ||
40 | struct crypto_report_comp rscomp; | ||
41 | |||
42 | strncpy(rscomp.type, "scomp", sizeof(rscomp.type)); | ||
43 | |||
44 | if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, | ||
45 | sizeof(struct crypto_report_comp), &rscomp)) | ||
46 | goto nla_put_failure; | ||
47 | return 0; | ||
48 | |||
49 | nla_put_failure: | ||
50 | return -EMSGSIZE; | ||
51 | } | ||
52 | #else | ||
53 | static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg) | ||
54 | { | ||
55 | return -ENOSYS; | ||
56 | } | ||
57 | #endif | ||
58 | |||
59 | static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg) | ||
60 | __attribute__ ((unused)); | ||
61 | |||
62 | static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg) | ||
63 | { | ||
64 | seq_puts(m, "type : scomp\n"); | ||
65 | } | ||
66 | |||
67 | static int crypto_scomp_init_tfm(struct crypto_tfm *tfm) | ||
68 | { | ||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static void crypto_scomp_free_scratches(void * __percpu *scratches) | ||
73 | { | ||
74 | int i; | ||
75 | |||
76 | if (!scratches) | ||
77 | return; | ||
78 | |||
79 | for_each_possible_cpu(i) | ||
80 | vfree(*per_cpu_ptr(scratches, i)); | ||
81 | |||
82 | free_percpu(scratches); | ||
83 | } | ||
84 | |||
85 | static void * __percpu *crypto_scomp_alloc_scratches(void) | ||
86 | { | ||
87 | void * __percpu *scratches; | ||
88 | int i; | ||
89 | |||
90 | scratches = alloc_percpu(void *); | ||
91 | if (!scratches) | ||
92 | return NULL; | ||
93 | |||
94 | for_each_possible_cpu(i) { | ||
95 | void *scratch; | ||
96 | |||
97 | scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i)); | ||
98 | if (!scratch) | ||
99 | goto error; | ||
100 | *per_cpu_ptr(scratches, i) = scratch; | ||
101 | } | ||
102 | |||
103 | return scratches; | ||
104 | |||
105 | error: | ||
106 | crypto_scomp_free_scratches(scratches); | ||
107 | return NULL; | ||
108 | } | ||
109 | |||
110 | static void crypto_scomp_free_all_scratches(void) | ||
111 | { | ||
112 | if (!--scomp_scratch_users) { | ||
113 | crypto_scomp_free_scratches(scomp_src_scratches); | ||
114 | crypto_scomp_free_scratches(scomp_dst_scratches); | ||
115 | scomp_src_scratches = NULL; | ||
116 | scomp_dst_scratches = NULL; | ||
117 | } | ||
118 | } | ||
119 | |||
120 | static int crypto_scomp_alloc_all_scratches(void) | ||
121 | { | ||
122 | if (!scomp_scratch_users++) { | ||
123 | scomp_src_scratches = crypto_scomp_alloc_scratches(); | ||
124 | if (!scomp_src_scratches) | ||
125 | return -ENOMEM; | ||
126 | scomp_dst_scratches = crypto_scomp_alloc_scratches(); | ||
127 | if (!scomp_dst_scratches) | ||
128 | return -ENOMEM; | ||
129 | } | ||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | static void crypto_scomp_sg_free(struct scatterlist *sgl) | ||
134 | { | ||
135 | int i, n; | ||
136 | struct page *page; | ||
137 | |||
138 | if (!sgl) | ||
139 | return; | ||
140 | |||
141 | n = sg_nents(sgl); | ||
142 | for_each_sg(sgl, sgl, n, i) { | ||
143 | page = sg_page(sgl); | ||
144 | if (page) | ||
145 | __free_page(page); | ||
146 | } | ||
147 | |||
148 | kfree(sgl); | ||
149 | } | ||
150 | |||
151 | static struct scatterlist *crypto_scomp_sg_alloc(size_t size, gfp_t gfp) | ||
152 | { | ||
153 | struct scatterlist *sgl; | ||
154 | struct page *page; | ||
155 | int i, n; | ||
156 | |||
157 | n = ((size - 1) >> PAGE_SHIFT) + 1; | ||
158 | |||
159 | sgl = kmalloc_array(n, sizeof(struct scatterlist), gfp); | ||
160 | if (!sgl) | ||
161 | return NULL; | ||
162 | |||
163 | sg_init_table(sgl, n); | ||
164 | |||
165 | for (i = 0; i < n; i++) { | ||
166 | page = alloc_page(gfp); | ||
167 | if (!page) | ||
168 | goto err; | ||
169 | sg_set_page(sgl + i, page, PAGE_SIZE, 0); | ||
170 | } | ||
171 | |||
172 | return sgl; | ||
173 | |||
174 | err: | ||
175 | sg_mark_end(sgl + i); | ||
176 | crypto_scomp_sg_free(sgl); | ||
177 | return NULL; | ||
178 | } | ||
179 | |||
180 | static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) | ||
181 | { | ||
182 | struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); | ||
183 | void **tfm_ctx = acomp_tfm_ctx(tfm); | ||
184 | struct crypto_scomp *scomp = *tfm_ctx; | ||
185 | void **ctx = acomp_request_ctx(req); | ||
186 | const int cpu = get_cpu(); | ||
187 | u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu); | ||
188 | u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu); | ||
189 | int ret; | ||
190 | |||
191 | if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) { | ||
192 | ret = -EINVAL; | ||
193 | goto out; | ||
194 | } | ||
195 | |||
196 | if (req->dst && !req->dlen) { | ||
197 | ret = -EINVAL; | ||
198 | goto out; | ||
199 | } | ||
200 | |||
201 | if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE) | ||
202 | req->dlen = SCOMP_SCRATCH_SIZE; | ||
203 | |||
204 | scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0); | ||
205 | if (dir) | ||
206 | ret = crypto_scomp_compress(scomp, scratch_src, req->slen, | ||
207 | scratch_dst, &req->dlen, *ctx); | ||
208 | else | ||
209 | ret = crypto_scomp_decompress(scomp, scratch_src, req->slen, | ||
210 | scratch_dst, &req->dlen, *ctx); | ||
211 | if (!ret) { | ||
212 | if (!req->dst) { | ||
213 | req->dst = crypto_scomp_sg_alloc(req->dlen, | ||
214 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? | ||
215 | GFP_KERNEL : GFP_ATOMIC); | ||
216 | if (!req->dst) | ||
217 | goto out; | ||
218 | } | ||
219 | scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen, | ||
220 | 1); | ||
221 | } | ||
222 | out: | ||
223 | put_cpu(); | ||
224 | return ret; | ||
225 | } | ||
226 | |||
227 | static int scomp_acomp_compress(struct acomp_req *req) | ||
228 | { | ||
229 | return scomp_acomp_comp_decomp(req, 1); | ||
230 | } | ||
231 | |||
232 | static int scomp_acomp_decompress(struct acomp_req *req) | ||
233 | { | ||
234 | return scomp_acomp_comp_decomp(req, 0); | ||
235 | } | ||
236 | |||
237 | static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm) | ||
238 | { | ||
239 | struct crypto_scomp **ctx = crypto_tfm_ctx(tfm); | ||
240 | |||
241 | crypto_free_scomp(*ctx); | ||
242 | } | ||
243 | |||
244 | int crypto_init_scomp_ops_async(struct crypto_tfm *tfm) | ||
245 | { | ||
246 | struct crypto_alg *calg = tfm->__crt_alg; | ||
247 | struct crypto_acomp *crt = __crypto_acomp_tfm(tfm); | ||
248 | struct crypto_scomp **ctx = crypto_tfm_ctx(tfm); | ||
249 | struct crypto_scomp *scomp; | ||
250 | |||
251 | if (!crypto_mod_get(calg)) | ||
252 | return -EAGAIN; | ||
253 | |||
254 | scomp = crypto_create_tfm(calg, &crypto_scomp_type); | ||
255 | if (IS_ERR(scomp)) { | ||
256 | crypto_mod_put(calg); | ||
257 | return PTR_ERR(scomp); | ||
258 | } | ||
259 | |||
260 | *ctx = scomp; | ||
261 | tfm->exit = crypto_exit_scomp_ops_async; | ||
262 | |||
263 | crt->compress = scomp_acomp_compress; | ||
264 | crt->decompress = scomp_acomp_decompress; | ||
265 | crt->dst_free = crypto_scomp_sg_free; | ||
266 | crt->reqsize = sizeof(void *); | ||
267 | |||
268 | return 0; | ||
269 | } | ||
270 | |||
271 | struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req) | ||
272 | { | ||
273 | struct crypto_acomp *acomp = crypto_acomp_reqtfm(req); | ||
274 | struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); | ||
275 | struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm); | ||
276 | struct crypto_scomp *scomp = *tfm_ctx; | ||
277 | void *ctx; | ||
278 | |||
279 | ctx = crypto_scomp_alloc_ctx(scomp); | ||
280 | if (IS_ERR(ctx)) { | ||
281 | kfree(req); | ||
282 | return NULL; | ||
283 | } | ||
284 | |||
285 | *req->__ctx = ctx; | ||
286 | |||
287 | return req; | ||
288 | } | ||
289 | |||
290 | void crypto_acomp_scomp_free_ctx(struct acomp_req *req) | ||
291 | { | ||
292 | struct crypto_acomp *acomp = crypto_acomp_reqtfm(req); | ||
293 | struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); | ||
294 | struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm); | ||
295 | struct crypto_scomp *scomp = *tfm_ctx; | ||
296 | void *ctx = *req->__ctx; | ||
297 | |||
298 | if (ctx) | ||
299 | crypto_scomp_free_ctx(scomp, ctx); | ||
300 | } | ||
301 | |||
302 | static const struct crypto_type crypto_scomp_type = { | ||
303 | .extsize = crypto_alg_extsize, | ||
304 | .init_tfm = crypto_scomp_init_tfm, | ||
305 | #ifdef CONFIG_PROC_FS | ||
306 | .show = crypto_scomp_show, | ||
307 | #endif | ||
308 | .report = crypto_scomp_report, | ||
309 | .maskclear = ~CRYPTO_ALG_TYPE_MASK, | ||
310 | .maskset = CRYPTO_ALG_TYPE_MASK, | ||
311 | .type = CRYPTO_ALG_TYPE_SCOMPRESS, | ||
312 | .tfmsize = offsetof(struct crypto_scomp, base), | ||
313 | }; | ||
314 | |||
315 | int crypto_register_scomp(struct scomp_alg *alg) | ||
316 | { | ||
317 | struct crypto_alg *base = &alg->base; | ||
318 | int ret = -ENOMEM; | ||
319 | |||
320 | mutex_lock(&scomp_lock); | ||
321 | if (crypto_scomp_alloc_all_scratches()) | ||
322 | goto error; | ||
323 | |||
324 | base->cra_type = &crypto_scomp_type; | ||
325 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; | ||
326 | base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS; | ||
327 | |||
328 | ret = crypto_register_alg(base); | ||
329 | if (ret) | ||
330 | goto error; | ||
331 | |||
332 | mutex_unlock(&scomp_lock); | ||
333 | return ret; | ||
334 | |||
335 | error: | ||
336 | crypto_scomp_free_all_scratches(); | ||
337 | mutex_unlock(&scomp_lock); | ||
338 | return ret; | ||
339 | } | ||
340 | EXPORT_SYMBOL_GPL(crypto_register_scomp); | ||
341 | |||
342 | int crypto_unregister_scomp(struct scomp_alg *alg) | ||
343 | { | ||
344 | int ret; | ||
345 | |||
346 | mutex_lock(&scomp_lock); | ||
347 | ret = crypto_unregister_alg(&alg->base); | ||
348 | crypto_scomp_free_all_scratches(); | ||
349 | mutex_unlock(&scomp_lock); | ||
350 | |||
351 | return ret; | ||
352 | } | ||
353 | EXPORT_SYMBOL_GPL(crypto_unregister_scomp); | ||
354 | |||
355 | MODULE_LICENSE("GPL"); | ||
356 | MODULE_DESCRIPTION("Synchronous compression type"); | ||
diff --git a/crypto/simd.c b/crypto/simd.c new file mode 100644 index 000000000000..88203370a62f --- /dev/null +++ b/crypto/simd.c | |||
@@ -0,0 +1,226 @@ | |||
1 | /* | ||
2 | * Shared crypto simd helpers | ||
3 | * | ||
4 | * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> | ||
5 | * Copyright (c) 2016 Herbert Xu <herbert@gondor.apana.org.au> | ||
6 | * | ||
7 | * Based on aesni-intel_glue.c by: | ||
8 | * Copyright (C) 2008, Intel Corp. | ||
9 | * Author: Huang Ying <ying.huang@intel.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2 of the License, or | ||
14 | * (at your option) any later version. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * GNU General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | ||
24 | * USA | ||
25 | * | ||
26 | */ | ||
27 | |||
28 | #include <crypto/cryptd.h> | ||
29 | #include <crypto/internal/simd.h> | ||
30 | #include <crypto/internal/skcipher.h> | ||
31 | #include <linux/kernel.h> | ||
32 | #include <linux/module.h> | ||
33 | #include <linux/preempt.h> | ||
34 | #include <asm/simd.h> | ||
35 | |||
36 | struct simd_skcipher_alg { | ||
37 | const char *ialg_name; | ||
38 | struct skcipher_alg alg; | ||
39 | }; | ||
40 | |||
41 | struct simd_skcipher_ctx { | ||
42 | struct cryptd_skcipher *cryptd_tfm; | ||
43 | }; | ||
44 | |||
45 | static int simd_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, | ||
46 | unsigned int key_len) | ||
47 | { | ||
48 | struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
49 | struct crypto_skcipher *child = &ctx->cryptd_tfm->base; | ||
50 | int err; | ||
51 | |||
52 | crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | ||
53 | crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(tfm) & | ||
54 | CRYPTO_TFM_REQ_MASK); | ||
55 | err = crypto_skcipher_setkey(child, key, key_len); | ||
56 | crypto_skcipher_set_flags(tfm, crypto_skcipher_get_flags(child) & | ||
57 | CRYPTO_TFM_RES_MASK); | ||
58 | return err; | ||
59 | } | ||
60 | |||
61 | static int simd_skcipher_encrypt(struct skcipher_request *req) | ||
62 | { | ||
63 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | ||
64 | struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
65 | struct skcipher_request *subreq; | ||
66 | struct crypto_skcipher *child; | ||
67 | |||
68 | subreq = skcipher_request_ctx(req); | ||
69 | *subreq = *req; | ||
70 | |||
71 | if (!may_use_simd() || | ||
72 | (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm))) | ||
73 | child = &ctx->cryptd_tfm->base; | ||
74 | else | ||
75 | child = cryptd_skcipher_child(ctx->cryptd_tfm); | ||
76 | |||
77 | skcipher_request_set_tfm(subreq, child); | ||
78 | |||
79 | return crypto_skcipher_encrypt(subreq); | ||
80 | } | ||
81 | |||
82 | static int simd_skcipher_decrypt(struct skcipher_request *req) | ||
83 | { | ||
84 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | ||
85 | struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
86 | struct skcipher_request *subreq; | ||
87 | struct crypto_skcipher *child; | ||
88 | |||
89 | subreq = skcipher_request_ctx(req); | ||
90 | *subreq = *req; | ||
91 | |||
92 | if (!may_use_simd() || | ||
93 | (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm))) | ||
94 | child = &ctx->cryptd_tfm->base; | ||
95 | else | ||
96 | child = cryptd_skcipher_child(ctx->cryptd_tfm); | ||
97 | |||
98 | skcipher_request_set_tfm(subreq, child); | ||
99 | |||
100 | return crypto_skcipher_decrypt(subreq); | ||
101 | } | ||
102 | |||
103 | static void simd_skcipher_exit(struct crypto_skcipher *tfm) | ||
104 | { | ||
105 | struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
106 | |||
107 | cryptd_free_skcipher(ctx->cryptd_tfm); | ||
108 | } | ||
109 | |||
110 | static int simd_skcipher_init(struct crypto_skcipher *tfm) | ||
111 | { | ||
112 | struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
113 | struct cryptd_skcipher *cryptd_tfm; | ||
114 | struct simd_skcipher_alg *salg; | ||
115 | struct skcipher_alg *alg; | ||
116 | unsigned reqsize; | ||
117 | |||
118 | alg = crypto_skcipher_alg(tfm); | ||
119 | salg = container_of(alg, struct simd_skcipher_alg, alg); | ||
120 | |||
121 | cryptd_tfm = cryptd_alloc_skcipher(salg->ialg_name, | ||
122 | CRYPTO_ALG_INTERNAL, | ||
123 | CRYPTO_ALG_INTERNAL); | ||
124 | if (IS_ERR(cryptd_tfm)) | ||
125 | return PTR_ERR(cryptd_tfm); | ||
126 | |||
127 | ctx->cryptd_tfm = cryptd_tfm; | ||
128 | |||
129 | reqsize = sizeof(struct skcipher_request); | ||
130 | reqsize += crypto_skcipher_reqsize(&cryptd_tfm->base); | ||
131 | |||
132 | crypto_skcipher_set_reqsize(tfm, reqsize); | ||
133 | |||
134 | return 0; | ||
135 | } | ||
136 | |||
137 | struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, | ||
138 | const char *drvname, | ||
139 | const char *basename) | ||
140 | { | ||
141 | struct simd_skcipher_alg *salg; | ||
142 | struct crypto_skcipher *tfm; | ||
143 | struct skcipher_alg *ialg; | ||
144 | struct skcipher_alg *alg; | ||
145 | int err; | ||
146 | |||
147 | tfm = crypto_alloc_skcipher(basename, CRYPTO_ALG_INTERNAL, | ||
148 | CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC); | ||
149 | if (IS_ERR(tfm)) | ||
150 | return ERR_CAST(tfm); | ||
151 | |||
152 | ialg = crypto_skcipher_alg(tfm); | ||
153 | |||
154 | salg = kzalloc(sizeof(*salg), GFP_KERNEL); | ||
155 | if (!salg) { | ||
156 | salg = ERR_PTR(-ENOMEM); | ||
157 | goto out_put_tfm; | ||
158 | } | ||
159 | |||
160 | salg->ialg_name = basename; | ||
161 | alg = &salg->alg; | ||
162 | |||
163 | err = -ENAMETOOLONG; | ||
164 | if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", algname) >= | ||
165 | CRYPTO_MAX_ALG_NAME) | ||
166 | goto out_free_salg; | ||
167 | |||
168 | if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", | ||
169 | drvname) >= CRYPTO_MAX_ALG_NAME) | ||
170 | goto out_free_salg; | ||
171 | |||
172 | alg->base.cra_flags = CRYPTO_ALG_ASYNC; | ||
173 | alg->base.cra_priority = ialg->base.cra_priority; | ||
174 | alg->base.cra_blocksize = ialg->base.cra_blocksize; | ||
175 | alg->base.cra_alignmask = ialg->base.cra_alignmask; | ||
176 | alg->base.cra_module = ialg->base.cra_module; | ||
177 | alg->base.cra_ctxsize = sizeof(struct simd_skcipher_ctx); | ||
178 | |||
179 | alg->ivsize = ialg->ivsize; | ||
180 | alg->chunksize = ialg->chunksize; | ||
181 | alg->min_keysize = ialg->min_keysize; | ||
182 | alg->max_keysize = ialg->max_keysize; | ||
183 | |||
184 | alg->init = simd_skcipher_init; | ||
185 | alg->exit = simd_skcipher_exit; | ||
186 | |||
187 | alg->setkey = simd_skcipher_setkey; | ||
188 | alg->encrypt = simd_skcipher_encrypt; | ||
189 | alg->decrypt = simd_skcipher_decrypt; | ||
190 | |||
191 | err = crypto_register_skcipher(alg); | ||
192 | if (err) | ||
193 | goto out_free_salg; | ||
194 | |||
195 | out_put_tfm: | ||
196 | crypto_free_skcipher(tfm); | ||
197 | return salg; | ||
198 | |||
199 | out_free_salg: | ||
200 | kfree(salg); | ||
201 | salg = ERR_PTR(err); | ||
202 | goto out_put_tfm; | ||
203 | } | ||
204 | EXPORT_SYMBOL_GPL(simd_skcipher_create_compat); | ||
205 | |||
206 | struct simd_skcipher_alg *simd_skcipher_create(const char *algname, | ||
207 | const char *basename) | ||
208 | { | ||
209 | char drvname[CRYPTO_MAX_ALG_NAME]; | ||
210 | |||
211 | if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >= | ||
212 | CRYPTO_MAX_ALG_NAME) | ||
213 | return ERR_PTR(-ENAMETOOLONG); | ||
214 | |||
215 | return simd_skcipher_create_compat(algname, drvname, basename); | ||
216 | } | ||
217 | EXPORT_SYMBOL_GPL(simd_skcipher_create); | ||
218 | |||
219 | void simd_skcipher_free(struct simd_skcipher_alg *salg) | ||
220 | { | ||
221 | crypto_unregister_skcipher(&salg->alg); | ||
222 | kfree(salg); | ||
223 | } | ||
224 | EXPORT_SYMBOL_GPL(simd_skcipher_free); | ||
225 | |||
226 | MODULE_LICENSE("GPL"); | ||
diff --git a/crypto/skcipher.c b/crypto/skcipher.c index f7d0018dcaee..aca07c643d41 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c | |||
@@ -14,9 +14,12 @@ | |||
14 | * | 14 | * |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <crypto/internal/aead.h> | ||
17 | #include <crypto/internal/skcipher.h> | 18 | #include <crypto/internal/skcipher.h> |
19 | #include <crypto/scatterwalk.h> | ||
18 | #include <linux/bug.h> | 20 | #include <linux/bug.h> |
19 | #include <linux/cryptouser.h> | 21 | #include <linux/cryptouser.h> |
22 | #include <linux/list.h> | ||
20 | #include <linux/module.h> | 23 | #include <linux/module.h> |
21 | #include <linux/rtnetlink.h> | 24 | #include <linux/rtnetlink.h> |
22 | #include <linux/seq_file.h> | 25 | #include <linux/seq_file.h> |
@@ -24,6 +27,543 @@ | |||
24 | 27 | ||
25 | #include "internal.h" | 28 | #include "internal.h" |
26 | 29 | ||
30 | enum { | ||
31 | SKCIPHER_WALK_PHYS = 1 << 0, | ||
32 | SKCIPHER_WALK_SLOW = 1 << 1, | ||
33 | SKCIPHER_WALK_COPY = 1 << 2, | ||
34 | SKCIPHER_WALK_DIFF = 1 << 3, | ||
35 | SKCIPHER_WALK_SLEEP = 1 << 4, | ||
36 | }; | ||
37 | |||
38 | struct skcipher_walk_buffer { | ||
39 | struct list_head entry; | ||
40 | struct scatter_walk dst; | ||
41 | unsigned int len; | ||
42 | u8 *data; | ||
43 | u8 buffer[]; | ||
44 | }; | ||
45 | |||
46 | static int skcipher_walk_next(struct skcipher_walk *walk); | ||
47 | |||
48 | static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr) | ||
49 | { | ||
50 | if (PageHighMem(scatterwalk_page(walk))) | ||
51 | kunmap_atomic(vaddr); | ||
52 | } | ||
53 | |||
54 | static inline void *skcipher_map(struct scatter_walk *walk) | ||
55 | { | ||
56 | struct page *page = scatterwalk_page(walk); | ||
57 | |||
58 | return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) + | ||
59 | offset_in_page(walk->offset); | ||
60 | } | ||
61 | |||
62 | static inline void skcipher_map_src(struct skcipher_walk *walk) | ||
63 | { | ||
64 | walk->src.virt.addr = skcipher_map(&walk->in); | ||
65 | } | ||
66 | |||
67 | static inline void skcipher_map_dst(struct skcipher_walk *walk) | ||
68 | { | ||
69 | walk->dst.virt.addr = skcipher_map(&walk->out); | ||
70 | } | ||
71 | |||
72 | static inline void skcipher_unmap_src(struct skcipher_walk *walk) | ||
73 | { | ||
74 | skcipher_unmap(&walk->in, walk->src.virt.addr); | ||
75 | } | ||
76 | |||
77 | static inline void skcipher_unmap_dst(struct skcipher_walk *walk) | ||
78 | { | ||
79 | skcipher_unmap(&walk->out, walk->dst.virt.addr); | ||
80 | } | ||
81 | |||
82 | static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk) | ||
83 | { | ||
84 | return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC; | ||
85 | } | ||
86 | |||
87 | /* Get a spot of the specified length that does not straddle a page. | ||
88 | * The caller needs to ensure that there is enough space for this operation. | ||
89 | */ | ||
90 | static inline u8 *skcipher_get_spot(u8 *start, unsigned int len) | ||
91 | { | ||
92 | u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); | ||
93 | |||
94 | return max(start, end_page); | ||
95 | } | ||
96 | |||
97 | static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) | ||
98 | { | ||
99 | u8 *addr; | ||
100 | |||
101 | addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); | ||
102 | addr = skcipher_get_spot(addr, bsize); | ||
103 | scatterwalk_copychunks(addr, &walk->out, bsize, | ||
104 | (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1); | ||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | int skcipher_walk_done(struct skcipher_walk *walk, int err) | ||
109 | { | ||
110 | unsigned int n = walk->nbytes - err; | ||
111 | unsigned int nbytes; | ||
112 | |||
113 | nbytes = walk->total - n; | ||
114 | |||
115 | if (unlikely(err < 0)) { | ||
116 | nbytes = 0; | ||
117 | n = 0; | ||
118 | } else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS | | ||
119 | SKCIPHER_WALK_SLOW | | ||
120 | SKCIPHER_WALK_COPY | | ||
121 | SKCIPHER_WALK_DIFF)))) { | ||
122 | unmap_src: | ||
123 | skcipher_unmap_src(walk); | ||
124 | } else if (walk->flags & SKCIPHER_WALK_DIFF) { | ||
125 | skcipher_unmap_dst(walk); | ||
126 | goto unmap_src; | ||
127 | } else if (walk->flags & SKCIPHER_WALK_COPY) { | ||
128 | skcipher_map_dst(walk); | ||
129 | memcpy(walk->dst.virt.addr, walk->page, n); | ||
130 | skcipher_unmap_dst(walk); | ||
131 | } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) { | ||
132 | if (WARN_ON(err)) { | ||
133 | err = -EINVAL; | ||
134 | nbytes = 0; | ||
135 | } else | ||
136 | n = skcipher_done_slow(walk, n); | ||
137 | } | ||
138 | |||
139 | if (err > 0) | ||
140 | err = 0; | ||
141 | |||
142 | walk->total = nbytes; | ||
143 | walk->nbytes = nbytes; | ||
144 | |||
145 | scatterwalk_advance(&walk->in, n); | ||
146 | scatterwalk_advance(&walk->out, n); | ||
147 | scatterwalk_done(&walk->in, 0, nbytes); | ||
148 | scatterwalk_done(&walk->out, 1, nbytes); | ||
149 | |||
150 | if (nbytes) { | ||
151 | crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ? | ||
152 | CRYPTO_TFM_REQ_MAY_SLEEP : 0); | ||
153 | return skcipher_walk_next(walk); | ||
154 | } | ||
155 | |||
156 | /* Short-circuit for the common/fast path. */ | ||
157 | if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) | ||
158 | goto out; | ||
159 | |||
160 | if (walk->flags & SKCIPHER_WALK_PHYS) | ||
161 | goto out; | ||
162 | |||
163 | if (walk->iv != walk->oiv) | ||
164 | memcpy(walk->oiv, walk->iv, walk->ivsize); | ||
165 | if (walk->buffer != walk->page) | ||
166 | kfree(walk->buffer); | ||
167 | if (walk->page) | ||
168 | free_page((unsigned long)walk->page); | ||
169 | |||
170 | out: | ||
171 | return err; | ||
172 | } | ||
173 | EXPORT_SYMBOL_GPL(skcipher_walk_done); | ||
174 | |||
175 | void skcipher_walk_complete(struct skcipher_walk *walk, int err) | ||
176 | { | ||
177 | struct skcipher_walk_buffer *p, *tmp; | ||
178 | |||
179 | list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { | ||
180 | u8 *data; | ||
181 | |||
182 | if (err) | ||
183 | goto done; | ||
184 | |||
185 | data = p->data; | ||
186 | if (!data) { | ||
187 | data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1); | ||
188 | data = skcipher_get_spot(data, walk->chunksize); | ||
189 | } | ||
190 | |||
191 | scatterwalk_copychunks(data, &p->dst, p->len, 1); | ||
192 | |||
193 | if (offset_in_page(p->data) + p->len + walk->chunksize > | ||
194 | PAGE_SIZE) | ||
195 | free_page((unsigned long)p->data); | ||
196 | |||
197 | done: | ||
198 | list_del(&p->entry); | ||
199 | kfree(p); | ||
200 | } | ||
201 | |||
202 | if (!err && walk->iv != walk->oiv) | ||
203 | memcpy(walk->oiv, walk->iv, walk->ivsize); | ||
204 | if (walk->buffer != walk->page) | ||
205 | kfree(walk->buffer); | ||
206 | if (walk->page) | ||
207 | free_page((unsigned long)walk->page); | ||
208 | } | ||
209 | EXPORT_SYMBOL_GPL(skcipher_walk_complete); | ||
210 | |||
211 | static void skcipher_queue_write(struct skcipher_walk *walk, | ||
212 | struct skcipher_walk_buffer *p) | ||
213 | { | ||
214 | p->dst = walk->out; | ||
215 | list_add_tail(&p->entry, &walk->buffers); | ||
216 | } | ||
217 | |||
218 | static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize) | ||
219 | { | ||
220 | bool phys = walk->flags & SKCIPHER_WALK_PHYS; | ||
221 | unsigned alignmask = walk->alignmask; | ||
222 | struct skcipher_walk_buffer *p; | ||
223 | unsigned a; | ||
224 | unsigned n; | ||
225 | u8 *buffer; | ||
226 | void *v; | ||
227 | |||
228 | if (!phys) { | ||
229 | buffer = walk->buffer ?: walk->page; | ||
230 | if (buffer) | ||
231 | goto ok; | ||
232 | } | ||
233 | |||
234 | /* Start with the minimum alignment of kmalloc. */ | ||
235 | a = crypto_tfm_ctx_alignment() - 1; | ||
236 | n = bsize; | ||
237 | |||
238 | if (phys) { | ||
239 | /* Calculate the minimum alignment of p->buffer. */ | ||
240 | a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1; | ||
241 | n += sizeof(*p); | ||
242 | } | ||
243 | |||
244 | /* Minimum size to align p->buffer by alignmask. */ | ||
245 | n += alignmask & ~a; | ||
246 | |||
247 | /* Minimum size to ensure p->buffer does not straddle a page. */ | ||
248 | n += (bsize - 1) & ~(alignmask | a); | ||
249 | |||
250 | v = kzalloc(n, skcipher_walk_gfp(walk)); | ||
251 | if (!v) | ||
252 | return skcipher_walk_done(walk, -ENOMEM); | ||
253 | |||
254 | if (phys) { | ||
255 | p = v; | ||
256 | p->len = bsize; | ||
257 | skcipher_queue_write(walk, p); | ||
258 | buffer = p->buffer; | ||
259 | } else { | ||
260 | walk->buffer = v; | ||
261 | buffer = v; | ||
262 | } | ||
263 | |||
264 | ok: | ||
265 | walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1); | ||
266 | walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize); | ||
267 | walk->src.virt.addr = walk->dst.virt.addr; | ||
268 | |||
269 | scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0); | ||
270 | |||
271 | walk->nbytes = bsize; | ||
272 | walk->flags |= SKCIPHER_WALK_SLOW; | ||
273 | |||
274 | return 0; | ||
275 | } | ||
276 | |||
277 | static int skcipher_next_copy(struct skcipher_walk *walk) | ||
278 | { | ||
279 | struct skcipher_walk_buffer *p; | ||
280 | u8 *tmp = walk->page; | ||
281 | |||
282 | skcipher_map_src(walk); | ||
283 | memcpy(tmp, walk->src.virt.addr, walk->nbytes); | ||
284 | skcipher_unmap_src(walk); | ||
285 | |||
286 | walk->src.virt.addr = tmp; | ||
287 | walk->dst.virt.addr = tmp; | ||
288 | |||
289 | if (!(walk->flags & SKCIPHER_WALK_PHYS)) | ||
290 | return 0; | ||
291 | |||
292 | p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk)); | ||
293 | if (!p) | ||
294 | return -ENOMEM; | ||
295 | |||
296 | p->data = walk->page; | ||
297 | p->len = walk->nbytes; | ||
298 | skcipher_queue_write(walk, p); | ||
299 | |||
300 | if (offset_in_page(walk->page) + walk->nbytes + walk->chunksize > | ||
301 | PAGE_SIZE) | ||
302 | walk->page = NULL; | ||
303 | else | ||
304 | walk->page += walk->nbytes; | ||
305 | |||
306 | return 0; | ||
307 | } | ||
308 | |||
309 | static int skcipher_next_fast(struct skcipher_walk *walk) | ||
310 | { | ||
311 | unsigned long diff; | ||
312 | |||
313 | walk->src.phys.page = scatterwalk_page(&walk->in); | ||
314 | walk->src.phys.offset = offset_in_page(walk->in.offset); | ||
315 | walk->dst.phys.page = scatterwalk_page(&walk->out); | ||
316 | walk->dst.phys.offset = offset_in_page(walk->out.offset); | ||
317 | |||
318 | if (walk->flags & SKCIPHER_WALK_PHYS) | ||
319 | return 0; | ||
320 | |||
321 | diff = walk->src.phys.offset - walk->dst.phys.offset; | ||
322 | diff |= walk->src.virt.page - walk->dst.virt.page; | ||
323 | |||
324 | skcipher_map_src(walk); | ||
325 | walk->dst.virt.addr = walk->src.virt.addr; | ||
326 | |||
327 | if (diff) { | ||
328 | walk->flags |= SKCIPHER_WALK_DIFF; | ||
329 | skcipher_map_dst(walk); | ||
330 | } | ||
331 | |||
332 | return 0; | ||
333 | } | ||
334 | |||
335 | static int skcipher_walk_next(struct skcipher_walk *walk) | ||
336 | { | ||
337 | unsigned int bsize; | ||
338 | unsigned int n; | ||
339 | int err; | ||
340 | |||
341 | walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | | ||
342 | SKCIPHER_WALK_DIFF); | ||
343 | |||
344 | n = walk->total; | ||
345 | bsize = min(walk->chunksize, max(n, walk->blocksize)); | ||
346 | n = scatterwalk_clamp(&walk->in, n); | ||
347 | n = scatterwalk_clamp(&walk->out, n); | ||
348 | |||
349 | if (unlikely(n < bsize)) { | ||
350 | if (unlikely(walk->total < walk->blocksize)) | ||
351 | return skcipher_walk_done(walk, -EINVAL); | ||
352 | |||
353 | slow_path: | ||
354 | err = skcipher_next_slow(walk, bsize); | ||
355 | goto set_phys_lowmem; | ||
356 | } | ||
357 | |||
358 | if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) { | ||
359 | if (!walk->page) { | ||
360 | gfp_t gfp = skcipher_walk_gfp(walk); | ||
361 | |||
362 | walk->page = (void *)__get_free_page(gfp); | ||
363 | if (!walk->page) | ||
364 | goto slow_path; | ||
365 | } | ||
366 | |||
367 | walk->nbytes = min_t(unsigned, n, | ||
368 | PAGE_SIZE - offset_in_page(walk->page)); | ||
369 | walk->flags |= SKCIPHER_WALK_COPY; | ||
370 | err = skcipher_next_copy(walk); | ||
371 | goto set_phys_lowmem; | ||
372 | } | ||
373 | |||
374 | walk->nbytes = n; | ||
375 | |||
376 | return skcipher_next_fast(walk); | ||
377 | |||
378 | set_phys_lowmem: | ||
379 | if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) { | ||
380 | walk->src.phys.page = virt_to_page(walk->src.virt.addr); | ||
381 | walk->dst.phys.page = virt_to_page(walk->dst.virt.addr); | ||
382 | walk->src.phys.offset &= PAGE_SIZE - 1; | ||
383 | walk->dst.phys.offset &= PAGE_SIZE - 1; | ||
384 | } | ||
385 | return err; | ||
386 | } | ||
387 | EXPORT_SYMBOL_GPL(skcipher_walk_next); | ||
388 | |||
389 | static int skcipher_copy_iv(struct skcipher_walk *walk) | ||
390 | { | ||
391 | unsigned a = crypto_tfm_ctx_alignment() - 1; | ||
392 | unsigned alignmask = walk->alignmask; | ||
393 | unsigned ivsize = walk->ivsize; | ||
394 | unsigned bs = walk->chunksize; | ||
395 | unsigned aligned_bs; | ||
396 | unsigned size; | ||
397 | u8 *iv; | ||
398 | |||
399 | aligned_bs = ALIGN(bs, alignmask); | ||
400 | |||
401 | /* Minimum size to align buffer by alignmask. */ | ||
402 | size = alignmask & ~a; | ||
403 | |||
404 | if (walk->flags & SKCIPHER_WALK_PHYS) | ||
405 | size += ivsize; | ||
406 | else { | ||
407 | size += aligned_bs + ivsize; | ||
408 | |||
409 | /* Minimum size to ensure buffer does not straddle a page. */ | ||
410 | size += (bs - 1) & ~(alignmask | a); | ||
411 | } | ||
412 | |||
413 | walk->buffer = kmalloc(size, skcipher_walk_gfp(walk)); | ||
414 | if (!walk->buffer) | ||
415 | return -ENOMEM; | ||
416 | |||
417 | iv = PTR_ALIGN(walk->buffer, alignmask + 1); | ||
418 | iv = skcipher_get_spot(iv, bs) + aligned_bs; | ||
419 | |||
420 | walk->iv = memcpy(iv, walk->iv, walk->ivsize); | ||
421 | return 0; | ||
422 | } | ||
423 | |||
424 | static int skcipher_walk_first(struct skcipher_walk *walk) | ||
425 | { | ||
426 | walk->nbytes = 0; | ||
427 | |||
428 | if (WARN_ON_ONCE(in_irq())) | ||
429 | return -EDEADLK; | ||
430 | |||
431 | if (unlikely(!walk->total)) | ||
432 | return 0; | ||
433 | |||
434 | walk->buffer = NULL; | ||
435 | if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { | ||
436 | int err = skcipher_copy_iv(walk); | ||
437 | if (err) | ||
438 | return err; | ||
439 | } | ||
440 | |||
441 | walk->page = NULL; | ||
442 | walk->nbytes = walk->total; | ||
443 | |||
444 | return skcipher_walk_next(walk); | ||
445 | } | ||
446 | |||
447 | static int skcipher_walk_skcipher(struct skcipher_walk *walk, | ||
448 | struct skcipher_request *req) | ||
449 | { | ||
450 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | ||
451 | |||
452 | scatterwalk_start(&walk->in, req->src); | ||
453 | scatterwalk_start(&walk->out, req->dst); | ||
454 | |||
455 | walk->total = req->cryptlen; | ||
456 | walk->iv = req->iv; | ||
457 | walk->oiv = req->iv; | ||
458 | |||
459 | walk->flags &= ~SKCIPHER_WALK_SLEEP; | ||
460 | walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? | ||
461 | SKCIPHER_WALK_SLEEP : 0; | ||
462 | |||
463 | walk->blocksize = crypto_skcipher_blocksize(tfm); | ||
464 | walk->chunksize = crypto_skcipher_chunksize(tfm); | ||
465 | walk->ivsize = crypto_skcipher_ivsize(tfm); | ||
466 | walk->alignmask = crypto_skcipher_alignmask(tfm); | ||
467 | |||
468 | return skcipher_walk_first(walk); | ||
469 | } | ||
470 | |||
471 | int skcipher_walk_virt(struct skcipher_walk *walk, | ||
472 | struct skcipher_request *req, bool atomic) | ||
473 | { | ||
474 | int err; | ||
475 | |||
476 | walk->flags &= ~SKCIPHER_WALK_PHYS; | ||
477 | |||
478 | err = skcipher_walk_skcipher(walk, req); | ||
479 | |||
480 | walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0; | ||
481 | |||
482 | return err; | ||
483 | } | ||
484 | EXPORT_SYMBOL_GPL(skcipher_walk_virt); | ||
485 | |||
486 | void skcipher_walk_atomise(struct skcipher_walk *walk) | ||
487 | { | ||
488 | walk->flags &= ~SKCIPHER_WALK_SLEEP; | ||
489 | } | ||
490 | EXPORT_SYMBOL_GPL(skcipher_walk_atomise); | ||
491 | |||
492 | int skcipher_walk_async(struct skcipher_walk *walk, | ||
493 | struct skcipher_request *req) | ||
494 | { | ||
495 | walk->flags |= SKCIPHER_WALK_PHYS; | ||
496 | |||
497 | INIT_LIST_HEAD(&walk->buffers); | ||
498 | |||
499 | return skcipher_walk_skcipher(walk, req); | ||
500 | } | ||
501 | EXPORT_SYMBOL_GPL(skcipher_walk_async); | ||
502 | |||
503 | static int skcipher_walk_aead_common(struct skcipher_walk *walk, | ||
504 | struct aead_request *req, bool atomic) | ||
505 | { | ||
506 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
507 | int err; | ||
508 | |||
509 | walk->flags &= ~SKCIPHER_WALK_PHYS; | ||
510 | |||
511 | scatterwalk_start(&walk->in, req->src); | ||
512 | scatterwalk_start(&walk->out, req->dst); | ||
513 | |||
514 | scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2); | ||
515 | scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2); | ||
516 | |||
517 | walk->iv = req->iv; | ||
518 | walk->oiv = req->iv; | ||
519 | |||
520 | if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) | ||
521 | walk->flags |= SKCIPHER_WALK_SLEEP; | ||
522 | else | ||
523 | walk->flags &= ~SKCIPHER_WALK_SLEEP; | ||
524 | |||
525 | walk->blocksize = crypto_aead_blocksize(tfm); | ||
526 | walk->chunksize = crypto_aead_chunksize(tfm); | ||
527 | walk->ivsize = crypto_aead_ivsize(tfm); | ||
528 | walk->alignmask = crypto_aead_alignmask(tfm); | ||
529 | |||
530 | err = skcipher_walk_first(walk); | ||
531 | |||
532 | if (atomic) | ||
533 | walk->flags &= ~SKCIPHER_WALK_SLEEP; | ||
534 | |||
535 | return err; | ||
536 | } | ||
537 | |||
538 | int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req, | ||
539 | bool atomic) | ||
540 | { | ||
541 | walk->total = req->cryptlen; | ||
542 | |||
543 | return skcipher_walk_aead_common(walk, req, atomic); | ||
544 | } | ||
545 | EXPORT_SYMBOL_GPL(skcipher_walk_aead); | ||
546 | |||
547 | int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, | ||
548 | struct aead_request *req, bool atomic) | ||
549 | { | ||
550 | walk->total = req->cryptlen; | ||
551 | |||
552 | return skcipher_walk_aead_common(walk, req, atomic); | ||
553 | } | ||
554 | EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt); | ||
555 | |||
556 | int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, | ||
557 | struct aead_request *req, bool atomic) | ||
558 | { | ||
559 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
560 | |||
561 | walk->total = req->cryptlen - crypto_aead_authsize(tfm); | ||
562 | |||
563 | return skcipher_walk_aead_common(walk, req, atomic); | ||
564 | } | ||
565 | EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt); | ||
566 | |||
27 | static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) | 567 | static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) |
28 | { | 568 | { |
29 | if (alg->cra_type == &crypto_blkcipher_type) | 569 | if (alg->cra_type == &crypto_blkcipher_type) |
diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 62dffa0028ac..f616ad74cce7 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <crypto/drbg.h> | 33 | #include <crypto/drbg.h> |
34 | #include <crypto/akcipher.h> | 34 | #include <crypto/akcipher.h> |
35 | #include <crypto/kpp.h> | 35 | #include <crypto/kpp.h> |
36 | #include <crypto/acompress.h> | ||
36 | 37 | ||
37 | #include "internal.h" | 38 | #include "internal.h" |
38 | 39 | ||
@@ -62,7 +63,7 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) | |||
62 | */ | 63 | */ |
63 | #define IDX1 32 | 64 | #define IDX1 32 |
64 | #define IDX2 32400 | 65 | #define IDX2 32400 |
65 | #define IDX3 1 | 66 | #define IDX3 1511 |
66 | #define IDX4 8193 | 67 | #define IDX4 8193 |
67 | #define IDX5 22222 | 68 | #define IDX5 22222 |
68 | #define IDX6 17101 | 69 | #define IDX6 17101 |
@@ -1442,6 +1443,126 @@ out: | |||
1442 | return ret; | 1443 | return ret; |
1443 | } | 1444 | } |
1444 | 1445 | ||
1446 | static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate, | ||
1447 | struct comp_testvec *dtemplate, int ctcount, int dtcount) | ||
1448 | { | ||
1449 | const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)); | ||
1450 | unsigned int i; | ||
1451 | char *output; | ||
1452 | int ret; | ||
1453 | struct scatterlist src, dst; | ||
1454 | struct acomp_req *req; | ||
1455 | struct tcrypt_result result; | ||
1456 | |||
1457 | output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL); | ||
1458 | if (!output) | ||
1459 | return -ENOMEM; | ||
1460 | |||
1461 | for (i = 0; i < ctcount; i++) { | ||
1462 | unsigned int dlen = COMP_BUF_SIZE; | ||
1463 | int ilen = ctemplate[i].inlen; | ||
1464 | |||
1465 | memset(output, 0, dlen); | ||
1466 | init_completion(&result.completion); | ||
1467 | sg_init_one(&src, ctemplate[i].input, ilen); | ||
1468 | sg_init_one(&dst, output, dlen); | ||
1469 | |||
1470 | req = acomp_request_alloc(tfm); | ||
1471 | if (!req) { | ||
1472 | pr_err("alg: acomp: request alloc failed for %s\n", | ||
1473 | algo); | ||
1474 | ret = -ENOMEM; | ||
1475 | goto out; | ||
1476 | } | ||
1477 | |||
1478 | acomp_request_set_params(req, &src, &dst, ilen, dlen); | ||
1479 | acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | ||
1480 | tcrypt_complete, &result); | ||
1481 | |||
1482 | ret = wait_async_op(&result, crypto_acomp_compress(req)); | ||
1483 | if (ret) { | ||
1484 | pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n", | ||
1485 | i + 1, algo, -ret); | ||
1486 | acomp_request_free(req); | ||
1487 | goto out; | ||
1488 | } | ||
1489 | |||
1490 | if (req->dlen != ctemplate[i].outlen) { | ||
1491 | pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n", | ||
1492 | i + 1, algo, req->dlen); | ||
1493 | ret = -EINVAL; | ||
1494 | acomp_request_free(req); | ||
1495 | goto out; | ||
1496 | } | ||
1497 | |||
1498 | if (memcmp(output, ctemplate[i].output, req->dlen)) { | ||
1499 | pr_err("alg: acomp: Compression test %d failed for %s\n", | ||
1500 | i + 1, algo); | ||
1501 | hexdump(output, req->dlen); | ||
1502 | ret = -EINVAL; | ||
1503 | acomp_request_free(req); | ||
1504 | goto out; | ||
1505 | } | ||
1506 | |||
1507 | acomp_request_free(req); | ||
1508 | } | ||
1509 | |||
1510 | for (i = 0; i < dtcount; i++) { | ||
1511 | unsigned int dlen = COMP_BUF_SIZE; | ||
1512 | int ilen = dtemplate[i].inlen; | ||
1513 | |||
1514 | memset(output, 0, dlen); | ||
1515 | init_completion(&result.completion); | ||
1516 | sg_init_one(&src, dtemplate[i].input, ilen); | ||
1517 | sg_init_one(&dst, output, dlen); | ||
1518 | |||
1519 | req = acomp_request_alloc(tfm); | ||
1520 | if (!req) { | ||
1521 | pr_err("alg: acomp: request alloc failed for %s\n", | ||
1522 | algo); | ||
1523 | ret = -ENOMEM; | ||
1524 | goto out; | ||
1525 | } | ||
1526 | |||
1527 | acomp_request_set_params(req, &src, &dst, ilen, dlen); | ||
1528 | acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | ||
1529 | tcrypt_complete, &result); | ||
1530 | |||
1531 | ret = wait_async_op(&result, crypto_acomp_decompress(req)); | ||
1532 | if (ret) { | ||
1533 | pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n", | ||
1534 | i + 1, algo, -ret); | ||
1535 | acomp_request_free(req); | ||
1536 | goto out; | ||
1537 | } | ||
1538 | |||
1539 | if (req->dlen != dtemplate[i].outlen) { | ||
1540 | pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n", | ||
1541 | i + 1, algo, req->dlen); | ||
1542 | ret = -EINVAL; | ||
1543 | acomp_request_free(req); | ||
1544 | goto out; | ||
1545 | } | ||
1546 | |||
1547 | if (memcmp(output, dtemplate[i].output, req->dlen)) { | ||
1548 | pr_err("alg: acomp: Decompression test %d failed for %s\n", | ||
1549 | i + 1, algo); | ||
1550 | hexdump(output, req->dlen); | ||
1551 | ret = -EINVAL; | ||
1552 | acomp_request_free(req); | ||
1553 | goto out; | ||
1554 | } | ||
1555 | |||
1556 | acomp_request_free(req); | ||
1557 | } | ||
1558 | |||
1559 | ret = 0; | ||
1560 | |||
1561 | out: | ||
1562 | kfree(output); | ||
1563 | return ret; | ||
1564 | } | ||
1565 | |||
1445 | static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template, | 1566 | static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template, |
1446 | unsigned int tcount) | 1567 | unsigned int tcount) |
1447 | { | 1568 | { |
@@ -1509,7 +1630,7 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver, | |||
1509 | struct crypto_aead *tfm; | 1630 | struct crypto_aead *tfm; |
1510 | int err = 0; | 1631 | int err = 0; |
1511 | 1632 | ||
1512 | tfm = crypto_alloc_aead(driver, type | CRYPTO_ALG_INTERNAL, mask); | 1633 | tfm = crypto_alloc_aead(driver, type, mask); |
1513 | if (IS_ERR(tfm)) { | 1634 | if (IS_ERR(tfm)) { |
1514 | printk(KERN_ERR "alg: aead: Failed to load transform for %s: " | 1635 | printk(KERN_ERR "alg: aead: Failed to load transform for %s: " |
1515 | "%ld\n", driver, PTR_ERR(tfm)); | 1636 | "%ld\n", driver, PTR_ERR(tfm)); |
@@ -1538,7 +1659,7 @@ static int alg_test_cipher(const struct alg_test_desc *desc, | |||
1538 | struct crypto_cipher *tfm; | 1659 | struct crypto_cipher *tfm; |
1539 | int err = 0; | 1660 | int err = 0; |
1540 | 1661 | ||
1541 | tfm = crypto_alloc_cipher(driver, type | CRYPTO_ALG_INTERNAL, mask); | 1662 | tfm = crypto_alloc_cipher(driver, type, mask); |
1542 | if (IS_ERR(tfm)) { | 1663 | if (IS_ERR(tfm)) { |
1543 | printk(KERN_ERR "alg: cipher: Failed to load transform for " | 1664 | printk(KERN_ERR "alg: cipher: Failed to load transform for " |
1544 | "%s: %ld\n", driver, PTR_ERR(tfm)); | 1665 | "%s: %ld\n", driver, PTR_ERR(tfm)); |
@@ -1567,7 +1688,7 @@ static int alg_test_skcipher(const struct alg_test_desc *desc, | |||
1567 | struct crypto_skcipher *tfm; | 1688 | struct crypto_skcipher *tfm; |
1568 | int err = 0; | 1689 | int err = 0; |
1569 | 1690 | ||
1570 | tfm = crypto_alloc_skcipher(driver, type | CRYPTO_ALG_INTERNAL, mask); | 1691 | tfm = crypto_alloc_skcipher(driver, type, mask); |
1571 | if (IS_ERR(tfm)) { | 1692 | if (IS_ERR(tfm)) { |
1572 | printk(KERN_ERR "alg: skcipher: Failed to load transform for " | 1693 | printk(KERN_ERR "alg: skcipher: Failed to load transform for " |
1573 | "%s: %ld\n", driver, PTR_ERR(tfm)); | 1694 | "%s: %ld\n", driver, PTR_ERR(tfm)); |
@@ -1593,22 +1714,38 @@ out: | |||
1593 | static int alg_test_comp(const struct alg_test_desc *desc, const char *driver, | 1714 | static int alg_test_comp(const struct alg_test_desc *desc, const char *driver, |
1594 | u32 type, u32 mask) | 1715 | u32 type, u32 mask) |
1595 | { | 1716 | { |
1596 | struct crypto_comp *tfm; | 1717 | struct crypto_comp *comp; |
1718 | struct crypto_acomp *acomp; | ||
1597 | int err; | 1719 | int err; |
1720 | u32 algo_type = type & CRYPTO_ALG_TYPE_ACOMPRESS_MASK; | ||
1721 | |||
1722 | if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) { | ||
1723 | acomp = crypto_alloc_acomp(driver, type, mask); | ||
1724 | if (IS_ERR(acomp)) { | ||
1725 | pr_err("alg: acomp: Failed to load transform for %s: %ld\n", | ||
1726 | driver, PTR_ERR(acomp)); | ||
1727 | return PTR_ERR(acomp); | ||
1728 | } | ||
1729 | err = test_acomp(acomp, desc->suite.comp.comp.vecs, | ||
1730 | desc->suite.comp.decomp.vecs, | ||
1731 | desc->suite.comp.comp.count, | ||
1732 | desc->suite.comp.decomp.count); | ||
1733 | crypto_free_acomp(acomp); | ||
1734 | } else { | ||
1735 | comp = crypto_alloc_comp(driver, type, mask); | ||
1736 | if (IS_ERR(comp)) { | ||
1737 | pr_err("alg: comp: Failed to load transform for %s: %ld\n", | ||
1738 | driver, PTR_ERR(comp)); | ||
1739 | return PTR_ERR(comp); | ||
1740 | } | ||
1598 | 1741 | ||
1599 | tfm = crypto_alloc_comp(driver, type, mask); | 1742 | err = test_comp(comp, desc->suite.comp.comp.vecs, |
1600 | if (IS_ERR(tfm)) { | 1743 | desc->suite.comp.decomp.vecs, |
1601 | printk(KERN_ERR "alg: comp: Failed to load transform for %s: " | 1744 | desc->suite.comp.comp.count, |
1602 | "%ld\n", driver, PTR_ERR(tfm)); | 1745 | desc->suite.comp.decomp.count); |
1603 | return PTR_ERR(tfm); | ||
1604 | } | ||
1605 | |||
1606 | err = test_comp(tfm, desc->suite.comp.comp.vecs, | ||
1607 | desc->suite.comp.decomp.vecs, | ||
1608 | desc->suite.comp.comp.count, | ||
1609 | desc->suite.comp.decomp.count); | ||
1610 | 1746 | ||
1611 | crypto_free_comp(tfm); | 1747 | crypto_free_comp(comp); |
1748 | } | ||
1612 | return err; | 1749 | return err; |
1613 | } | 1750 | } |
1614 | 1751 | ||
@@ -1618,7 +1755,7 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver, | |||
1618 | struct crypto_ahash *tfm; | 1755 | struct crypto_ahash *tfm; |
1619 | int err; | 1756 | int err; |
1620 | 1757 | ||
1621 | tfm = crypto_alloc_ahash(driver, type | CRYPTO_ALG_INTERNAL, mask); | 1758 | tfm = crypto_alloc_ahash(driver, type, mask); |
1622 | if (IS_ERR(tfm)) { | 1759 | if (IS_ERR(tfm)) { |
1623 | printk(KERN_ERR "alg: hash: Failed to load transform for %s: " | 1760 | printk(KERN_ERR "alg: hash: Failed to load transform for %s: " |
1624 | "%ld\n", driver, PTR_ERR(tfm)); | 1761 | "%ld\n", driver, PTR_ERR(tfm)); |
@@ -1646,7 +1783,7 @@ static int alg_test_crc32c(const struct alg_test_desc *desc, | |||
1646 | if (err) | 1783 | if (err) |
1647 | goto out; | 1784 | goto out; |
1648 | 1785 | ||
1649 | tfm = crypto_alloc_shash(driver, type | CRYPTO_ALG_INTERNAL, mask); | 1786 | tfm = crypto_alloc_shash(driver, type, mask); |
1650 | if (IS_ERR(tfm)) { | 1787 | if (IS_ERR(tfm)) { |
1651 | printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: " | 1788 | printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: " |
1652 | "%ld\n", driver, PTR_ERR(tfm)); | 1789 | "%ld\n", driver, PTR_ERR(tfm)); |
@@ -1688,7 +1825,7 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver, | |||
1688 | struct crypto_rng *rng; | 1825 | struct crypto_rng *rng; |
1689 | int err; | 1826 | int err; |
1690 | 1827 | ||
1691 | rng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask); | 1828 | rng = crypto_alloc_rng(driver, type, mask); |
1692 | if (IS_ERR(rng)) { | 1829 | if (IS_ERR(rng)) { |
1693 | printk(KERN_ERR "alg: cprng: Failed to load transform for %s: " | 1830 | printk(KERN_ERR "alg: cprng: Failed to load transform for %s: " |
1694 | "%ld\n", driver, PTR_ERR(rng)); | 1831 | "%ld\n", driver, PTR_ERR(rng)); |
@@ -1715,7 +1852,7 @@ static int drbg_cavs_test(struct drbg_testvec *test, int pr, | |||
1715 | if (!buf) | 1852 | if (!buf) |
1716 | return -ENOMEM; | 1853 | return -ENOMEM; |
1717 | 1854 | ||
1718 | drng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask); | 1855 | drng = crypto_alloc_rng(driver, type, mask); |
1719 | if (IS_ERR(drng)) { | 1856 | if (IS_ERR(drng)) { |
1720 | printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for " | 1857 | printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for " |
1721 | "%s\n", driver); | 1858 | "%s\n", driver); |
@@ -1909,7 +2046,7 @@ static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver, | |||
1909 | struct crypto_kpp *tfm; | 2046 | struct crypto_kpp *tfm; |
1910 | int err = 0; | 2047 | int err = 0; |
1911 | 2048 | ||
1912 | tfm = crypto_alloc_kpp(driver, type | CRYPTO_ALG_INTERNAL, mask); | 2049 | tfm = crypto_alloc_kpp(driver, type, mask); |
1913 | if (IS_ERR(tfm)) { | 2050 | if (IS_ERR(tfm)) { |
1914 | pr_err("alg: kpp: Failed to load tfm for %s: %ld\n", | 2051 | pr_err("alg: kpp: Failed to load tfm for %s: %ld\n", |
1915 | driver, PTR_ERR(tfm)); | 2052 | driver, PTR_ERR(tfm)); |
@@ -2068,7 +2205,7 @@ static int alg_test_akcipher(const struct alg_test_desc *desc, | |||
2068 | struct crypto_akcipher *tfm; | 2205 | struct crypto_akcipher *tfm; |
2069 | int err = 0; | 2206 | int err = 0; |
2070 | 2207 | ||
2071 | tfm = crypto_alloc_akcipher(driver, type | CRYPTO_ALG_INTERNAL, mask); | 2208 | tfm = crypto_alloc_akcipher(driver, type, mask); |
2072 | if (IS_ERR(tfm)) { | 2209 | if (IS_ERR(tfm)) { |
2073 | pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n", | 2210 | pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n", |
2074 | driver, PTR_ERR(tfm)); | 2211 | driver, PTR_ERR(tfm)); |
@@ -2091,88 +2228,6 @@ static int alg_test_null(const struct alg_test_desc *desc, | |||
2091 | /* Please keep this list sorted by algorithm name. */ | 2228 | /* Please keep this list sorted by algorithm name. */ |
2092 | static const struct alg_test_desc alg_test_descs[] = { | 2229 | static const struct alg_test_desc alg_test_descs[] = { |
2093 | { | 2230 | { |
2094 | .alg = "__cbc-cast5-avx", | ||
2095 | .test = alg_test_null, | ||
2096 | }, { | ||
2097 | .alg = "__cbc-cast6-avx", | ||
2098 | .test = alg_test_null, | ||
2099 | }, { | ||
2100 | .alg = "__cbc-serpent-avx", | ||
2101 | .test = alg_test_null, | ||
2102 | }, { | ||
2103 | .alg = "__cbc-serpent-avx2", | ||
2104 | .test = alg_test_null, | ||
2105 | }, { | ||
2106 | .alg = "__cbc-serpent-sse2", | ||
2107 | .test = alg_test_null, | ||
2108 | }, { | ||
2109 | .alg = "__cbc-twofish-avx", | ||
2110 | .test = alg_test_null, | ||
2111 | }, { | ||
2112 | .alg = "__driver-cbc-aes-aesni", | ||
2113 | .test = alg_test_null, | ||
2114 | .fips_allowed = 1, | ||
2115 | }, { | ||
2116 | .alg = "__driver-cbc-camellia-aesni", | ||
2117 | .test = alg_test_null, | ||
2118 | }, { | ||
2119 | .alg = "__driver-cbc-camellia-aesni-avx2", | ||
2120 | .test = alg_test_null, | ||
2121 | }, { | ||
2122 | .alg = "__driver-cbc-cast5-avx", | ||
2123 | .test = alg_test_null, | ||
2124 | }, { | ||
2125 | .alg = "__driver-cbc-cast6-avx", | ||
2126 | .test = alg_test_null, | ||
2127 | }, { | ||
2128 | .alg = "__driver-cbc-serpent-avx", | ||
2129 | .test = alg_test_null, | ||
2130 | }, { | ||
2131 | .alg = "__driver-cbc-serpent-avx2", | ||
2132 | .test = alg_test_null, | ||
2133 | }, { | ||
2134 | .alg = "__driver-cbc-serpent-sse2", | ||
2135 | .test = alg_test_null, | ||
2136 | }, { | ||
2137 | .alg = "__driver-cbc-twofish-avx", | ||
2138 | .test = alg_test_null, | ||
2139 | }, { | ||
2140 | .alg = "__driver-ecb-aes-aesni", | ||
2141 | .test = alg_test_null, | ||
2142 | .fips_allowed = 1, | ||
2143 | }, { | ||
2144 | .alg = "__driver-ecb-camellia-aesni", | ||
2145 | .test = alg_test_null, | ||
2146 | }, { | ||
2147 | .alg = "__driver-ecb-camellia-aesni-avx2", | ||
2148 | .test = alg_test_null, | ||
2149 | }, { | ||
2150 | .alg = "__driver-ecb-cast5-avx", | ||
2151 | .test = alg_test_null, | ||
2152 | }, { | ||
2153 | .alg = "__driver-ecb-cast6-avx", | ||
2154 | .test = alg_test_null, | ||
2155 | }, { | ||
2156 | .alg = "__driver-ecb-serpent-avx", | ||
2157 | .test = alg_test_null, | ||
2158 | }, { | ||
2159 | .alg = "__driver-ecb-serpent-avx2", | ||
2160 | .test = alg_test_null, | ||
2161 | }, { | ||
2162 | .alg = "__driver-ecb-serpent-sse2", | ||
2163 | .test = alg_test_null, | ||
2164 | }, { | ||
2165 | .alg = "__driver-ecb-twofish-avx", | ||
2166 | .test = alg_test_null, | ||
2167 | }, { | ||
2168 | .alg = "__driver-gcm-aes-aesni", | ||
2169 | .test = alg_test_null, | ||
2170 | .fips_allowed = 1, | ||
2171 | }, { | ||
2172 | .alg = "__ghash-pclmulqdqni", | ||
2173 | .test = alg_test_null, | ||
2174 | .fips_allowed = 1, | ||
2175 | }, { | ||
2176 | .alg = "ansi_cprng", | 2231 | .alg = "ansi_cprng", |
2177 | .test = alg_test_cprng, | 2232 | .test = alg_test_cprng, |
2178 | .suite = { | 2233 | .suite = { |
@@ -2659,55 +2714,6 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
2659 | } | 2714 | } |
2660 | } | 2715 | } |
2661 | }, { | 2716 | }, { |
2662 | .alg = "cryptd(__driver-cbc-aes-aesni)", | ||
2663 | .test = alg_test_null, | ||
2664 | .fips_allowed = 1, | ||
2665 | }, { | ||
2666 | .alg = "cryptd(__driver-cbc-camellia-aesni)", | ||
2667 | .test = alg_test_null, | ||
2668 | }, { | ||
2669 | .alg = "cryptd(__driver-cbc-camellia-aesni-avx2)", | ||
2670 | .test = alg_test_null, | ||
2671 | }, { | ||
2672 | .alg = "cryptd(__driver-cbc-serpent-avx2)", | ||
2673 | .test = alg_test_null, | ||
2674 | }, { | ||
2675 | .alg = "cryptd(__driver-ecb-aes-aesni)", | ||
2676 | .test = alg_test_null, | ||
2677 | .fips_allowed = 1, | ||
2678 | }, { | ||
2679 | .alg = "cryptd(__driver-ecb-camellia-aesni)", | ||
2680 | .test = alg_test_null, | ||
2681 | }, { | ||
2682 | .alg = "cryptd(__driver-ecb-camellia-aesni-avx2)", | ||
2683 | .test = alg_test_null, | ||
2684 | }, { | ||
2685 | .alg = "cryptd(__driver-ecb-cast5-avx)", | ||
2686 | .test = alg_test_null, | ||
2687 | }, { | ||
2688 | .alg = "cryptd(__driver-ecb-cast6-avx)", | ||
2689 | .test = alg_test_null, | ||
2690 | }, { | ||
2691 | .alg = "cryptd(__driver-ecb-serpent-avx)", | ||
2692 | .test = alg_test_null, | ||
2693 | }, { | ||
2694 | .alg = "cryptd(__driver-ecb-serpent-avx2)", | ||
2695 | .test = alg_test_null, | ||
2696 | }, { | ||
2697 | .alg = "cryptd(__driver-ecb-serpent-sse2)", | ||
2698 | .test = alg_test_null, | ||
2699 | }, { | ||
2700 | .alg = "cryptd(__driver-ecb-twofish-avx)", | ||
2701 | .test = alg_test_null, | ||
2702 | }, { | ||
2703 | .alg = "cryptd(__driver-gcm-aes-aesni)", | ||
2704 | .test = alg_test_null, | ||
2705 | .fips_allowed = 1, | ||
2706 | }, { | ||
2707 | .alg = "cryptd(__ghash-pclmulqdqni)", | ||
2708 | .test = alg_test_null, | ||
2709 | .fips_allowed = 1, | ||
2710 | }, { | ||
2711 | .alg = "ctr(aes)", | 2717 | .alg = "ctr(aes)", |
2712 | .test = alg_test_skcipher, | 2718 | .test = alg_test_skcipher, |
2713 | .fips_allowed = 1, | 2719 | .fips_allowed = 1, |
@@ -3034,10 +3040,6 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
3034 | .fips_allowed = 1, | 3040 | .fips_allowed = 1, |
3035 | .test = alg_test_null, | 3041 | .test = alg_test_null, |
3036 | }, { | 3042 | }, { |
3037 | .alg = "ecb(__aes-aesni)", | ||
3038 | .test = alg_test_null, | ||
3039 | .fips_allowed = 1, | ||
3040 | }, { | ||
3041 | .alg = "ecb(aes)", | 3043 | .alg = "ecb(aes)", |
3042 | .test = alg_test_skcipher, | 3044 | .test = alg_test_skcipher, |
3043 | .fips_allowed = 1, | 3045 | .fips_allowed = 1, |
diff --git a/crypto/testmgr.h b/crypto/testmgr.h index e64a4ef9d8ca..9b656be7f52f 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h | |||
@@ -1334,36 +1334,50 @@ static struct hash_testvec rmd320_tv_template[] = { | |||
1334 | } | 1334 | } |
1335 | }; | 1335 | }; |
1336 | 1336 | ||
1337 | #define CRCT10DIF_TEST_VECTORS 3 | 1337 | #define CRCT10DIF_TEST_VECTORS ARRAY_SIZE(crct10dif_tv_template) |
1338 | static struct hash_testvec crct10dif_tv_template[] = { | 1338 | static struct hash_testvec crct10dif_tv_template[] = { |
1339 | { | 1339 | { |
1340 | .plaintext = "abc", | 1340 | .plaintext = "abc", |
1341 | .psize = 3, | 1341 | .psize = 3, |
1342 | #ifdef __LITTLE_ENDIAN | 1342 | .digest = (u8 *)(u16 []){ 0x443b }, |
1343 | .digest = "\x3b\x44", | 1343 | }, { |
1344 | #else | 1344 | .plaintext = "1234567890123456789012345678901234567890" |
1345 | .digest = "\x44\x3b", | 1345 | "123456789012345678901234567890123456789", |
1346 | #endif | 1346 | .psize = 79, |
1347 | }, { | 1347 | .digest = (u8 *)(u16 []){ 0x4b70 }, |
1348 | .plaintext = "1234567890123456789012345678901234567890" | 1348 | .np = 2, |
1349 | "123456789012345678901234567890123456789", | 1349 | .tap = { 63, 16 }, |
1350 | .psize = 79, | 1350 | }, { |
1351 | #ifdef __LITTLE_ENDIAN | 1351 | .plaintext = "abcdddddddddddddddddddddddddddddddddddddddd" |
1352 | .digest = "\x70\x4b", | 1352 | "ddddddddddddd", |
1353 | #else | 1353 | .psize = 56, |
1354 | .digest = "\x4b\x70", | 1354 | .digest = (u8 *)(u16 []){ 0x9ce3 }, |
1355 | #endif | 1355 | .np = 8, |
1356 | }, { | 1356 | .tap = { 1, 2, 28, 7, 6, 5, 4, 3 }, |
1357 | .plaintext = | 1357 | }, { |
1358 | "abcddddddddddddddddddddddddddddddddddddddddddddddddddddd", | 1358 | .plaintext = "1234567890123456789012345678901234567890" |
1359 | .psize = 56, | 1359 | "1234567890123456789012345678901234567890" |
1360 | #ifdef __LITTLE_ENDIAN | 1360 | "1234567890123456789012345678901234567890" |
1361 | .digest = "\xe3\x9c", | 1361 | "1234567890123456789012345678901234567890" |
1362 | #else | 1362 | "1234567890123456789012345678901234567890" |
1363 | .digest = "\x9c\xe3", | 1363 | "1234567890123456789012345678901234567890" |
1364 | #endif | 1364 | "1234567890123456789012345678901234567890" |
1365 | .np = 2, | 1365 | "123456789012345678901234567890123456789", |
1366 | .tap = { 28, 28 } | 1366 | .psize = 319, |
1367 | .digest = (u8 *)(u16 []){ 0x44c6 }, | ||
1368 | }, { | ||
1369 | .plaintext = "1234567890123456789012345678901234567890" | ||
1370 | "1234567890123456789012345678901234567890" | ||
1371 | "1234567890123456789012345678901234567890" | ||
1372 | "1234567890123456789012345678901234567890" | ||
1373 | "1234567890123456789012345678901234567890" | ||
1374 | "1234567890123456789012345678901234567890" | ||
1375 | "1234567890123456789012345678901234567890" | ||
1376 | "123456789012345678901234567890123456789", | ||
1377 | .psize = 319, | ||
1378 | .digest = (u8 *)(u16 []){ 0x44c6 }, | ||
1379 | .np = 4, | ||
1380 | .tap = { 1, 255, 57, 6 }, | ||
1367 | } | 1381 | } |
1368 | }; | 1382 | }; |
1369 | 1383 | ||
diff --git a/crypto/xts.c b/crypto/xts.c index 305343f22a02..410a2e299085 100644 --- a/crypto/xts.c +++ b/crypto/xts.c | |||
@@ -13,7 +13,8 @@ | |||
13 | * Software Foundation; either version 2 of the License, or (at your option) | 13 | * Software Foundation; either version 2 of the License, or (at your option) |
14 | * any later version. | 14 | * any later version. |
15 | */ | 15 | */ |
16 | #include <crypto/algapi.h> | 16 | #include <crypto/internal/skcipher.h> |
17 | #include <crypto/scatterwalk.h> | ||
17 | #include <linux/err.h> | 18 | #include <linux/err.h> |
18 | #include <linux/init.h> | 19 | #include <linux/init.h> |
19 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
@@ -25,140 +26,320 @@ | |||
25 | #include <crypto/b128ops.h> | 26 | #include <crypto/b128ops.h> |
26 | #include <crypto/gf128mul.h> | 27 | #include <crypto/gf128mul.h> |
27 | 28 | ||
29 | #define XTS_BUFFER_SIZE 128u | ||
30 | |||
28 | struct priv { | 31 | struct priv { |
29 | struct crypto_cipher *child; | 32 | struct crypto_skcipher *child; |
30 | struct crypto_cipher *tweak; | 33 | struct crypto_cipher *tweak; |
31 | }; | 34 | }; |
32 | 35 | ||
33 | static int setkey(struct crypto_tfm *parent, const u8 *key, | 36 | struct xts_instance_ctx { |
37 | struct crypto_skcipher_spawn spawn; | ||
38 | char name[CRYPTO_MAX_ALG_NAME]; | ||
39 | }; | ||
40 | |||
41 | struct rctx { | ||
42 | be128 buf[XTS_BUFFER_SIZE / sizeof(be128)]; | ||
43 | |||
44 | be128 t; | ||
45 | |||
46 | be128 *ext; | ||
47 | |||
48 | struct scatterlist srcbuf[2]; | ||
49 | struct scatterlist dstbuf[2]; | ||
50 | struct scatterlist *src; | ||
51 | struct scatterlist *dst; | ||
52 | |||
53 | unsigned int left; | ||
54 | |||
55 | struct skcipher_request subreq; | ||
56 | }; | ||
57 | |||
58 | static int setkey(struct crypto_skcipher *parent, const u8 *key, | ||
34 | unsigned int keylen) | 59 | unsigned int keylen) |
35 | { | 60 | { |
36 | struct priv *ctx = crypto_tfm_ctx(parent); | 61 | struct priv *ctx = crypto_skcipher_ctx(parent); |
37 | struct crypto_cipher *child = ctx->tweak; | 62 | struct crypto_skcipher *child; |
63 | struct crypto_cipher *tweak; | ||
38 | int err; | 64 | int err; |
39 | 65 | ||
40 | err = xts_check_key(parent, key, keylen); | 66 | err = xts_verify_key(parent, key, keylen); |
41 | if (err) | 67 | if (err) |
42 | return err; | 68 | return err; |
43 | 69 | ||
70 | keylen /= 2; | ||
71 | |||
44 | /* we need two cipher instances: one to compute the initial 'tweak' | 72 | /* we need two cipher instances: one to compute the initial 'tweak' |
45 | * by encrypting the IV (usually the 'plain' iv) and the other | 73 | * by encrypting the IV (usually the 'plain' iv) and the other |
46 | * one to encrypt and decrypt the data */ | 74 | * one to encrypt and decrypt the data */ |
47 | 75 | ||
48 | /* tweak cipher, uses Key2 i.e. the second half of *key */ | 76 | /* tweak cipher, uses Key2 i.e. the second half of *key */ |
49 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 77 | tweak = ctx->tweak; |
50 | crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & | 78 | crypto_cipher_clear_flags(tweak, CRYPTO_TFM_REQ_MASK); |
79 | crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) & | ||
51 | CRYPTO_TFM_REQ_MASK); | 80 | CRYPTO_TFM_REQ_MASK); |
52 | err = crypto_cipher_setkey(child, key + keylen/2, keylen/2); | 81 | err = crypto_cipher_setkey(tweak, key + keylen, keylen); |
82 | crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(tweak) & | ||
83 | CRYPTO_TFM_RES_MASK); | ||
53 | if (err) | 84 | if (err) |
54 | return err; | 85 | return err; |
55 | 86 | ||
56 | crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & | 87 | /* data cipher, uses Key1 i.e. the first half of *key */ |
57 | CRYPTO_TFM_RES_MASK); | ||
58 | |||
59 | child = ctx->child; | 88 | child = ctx->child; |
89 | crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | ||
90 | crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & | ||
91 | CRYPTO_TFM_REQ_MASK); | ||
92 | err = crypto_skcipher_setkey(child, key, keylen); | ||
93 | crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & | ||
94 | CRYPTO_TFM_RES_MASK); | ||
60 | 95 | ||
61 | /* data cipher, uses Key1 i.e. the first half of *key */ | 96 | return err; |
62 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 97 | } |
63 | crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & | ||
64 | CRYPTO_TFM_REQ_MASK); | ||
65 | err = crypto_cipher_setkey(child, key, keylen/2); | ||
66 | if (err) | ||
67 | return err; | ||
68 | 98 | ||
69 | crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & | 99 | static int post_crypt(struct skcipher_request *req) |
70 | CRYPTO_TFM_RES_MASK); | 100 | { |
101 | struct rctx *rctx = skcipher_request_ctx(req); | ||
102 | be128 *buf = rctx->ext ?: rctx->buf; | ||
103 | struct skcipher_request *subreq; | ||
104 | const int bs = XTS_BLOCK_SIZE; | ||
105 | struct skcipher_walk w; | ||
106 | struct scatterlist *sg; | ||
107 | unsigned offset; | ||
108 | int err; | ||
71 | 109 | ||
72 | return 0; | 110 | subreq = &rctx->subreq; |
73 | } | 111 | err = skcipher_walk_virt(&w, subreq, false); |
74 | 112 | ||
75 | struct sinfo { | 113 | while (w.nbytes) { |
76 | be128 *t; | 114 | unsigned int avail = w.nbytes; |
77 | struct crypto_tfm *tfm; | 115 | be128 *wdst; |
78 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *); | ||
79 | }; | ||
80 | 116 | ||
81 | static inline void xts_round(struct sinfo *s, void *dst, const void *src) | 117 | wdst = w.dst.virt.addr; |
82 | { | 118 | |
83 | be128_xor(dst, s->t, src); /* PP <- T xor P */ | 119 | do { |
84 | s->fn(s->tfm, dst, dst); /* CC <- E(Key1,PP) */ | 120 | be128_xor(wdst, buf++, wdst); |
85 | be128_xor(dst, dst, s->t); /* C <- T xor CC */ | 121 | wdst++; |
122 | } while ((avail -= bs) >= bs); | ||
123 | |||
124 | err = skcipher_walk_done(&w, avail); | ||
125 | } | ||
126 | |||
127 | rctx->left -= subreq->cryptlen; | ||
128 | |||
129 | if (err || !rctx->left) | ||
130 | goto out; | ||
131 | |||
132 | rctx->dst = rctx->dstbuf; | ||
133 | |||
134 | scatterwalk_done(&w.out, 0, 1); | ||
135 | sg = w.out.sg; | ||
136 | offset = w.out.offset; | ||
137 | |||
138 | if (rctx->dst != sg) { | ||
139 | rctx->dst[0] = *sg; | ||
140 | sg_unmark_end(rctx->dst); | ||
141 | scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2); | ||
142 | } | ||
143 | rctx->dst[0].length -= offset - sg->offset; | ||
144 | rctx->dst[0].offset = offset; | ||
145 | |||
146 | out: | ||
147 | return err; | ||
86 | } | 148 | } |
87 | 149 | ||
88 | static int crypt(struct blkcipher_desc *d, | 150 | static int pre_crypt(struct skcipher_request *req) |
89 | struct blkcipher_walk *w, struct priv *ctx, | ||
90 | void (*tw)(struct crypto_tfm *, u8 *, const u8 *), | ||
91 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) | ||
92 | { | 151 | { |
93 | int err; | 152 | struct rctx *rctx = skcipher_request_ctx(req); |
94 | unsigned int avail; | 153 | be128 *buf = rctx->ext ?: rctx->buf; |
154 | struct skcipher_request *subreq; | ||
95 | const int bs = XTS_BLOCK_SIZE; | 155 | const int bs = XTS_BLOCK_SIZE; |
96 | struct sinfo s = { | 156 | struct skcipher_walk w; |
97 | .tfm = crypto_cipher_tfm(ctx->child), | 157 | struct scatterlist *sg; |
98 | .fn = fn | 158 | unsigned cryptlen; |
99 | }; | 159 | unsigned offset; |
100 | u8 *wsrc; | 160 | bool more; |
101 | u8 *wdst; | 161 | int err; |
102 | |||
103 | err = blkcipher_walk_virt(d, w); | ||
104 | if (!w->nbytes) | ||
105 | return err; | ||
106 | 162 | ||
107 | s.t = (be128 *)w->iv; | 163 | subreq = &rctx->subreq; |
108 | avail = w->nbytes; | 164 | cryptlen = subreq->cryptlen; |
109 | 165 | ||
110 | wsrc = w->src.virt.addr; | 166 | more = rctx->left > cryptlen; |
111 | wdst = w->dst.virt.addr; | 167 | if (!more) |
168 | cryptlen = rctx->left; | ||
112 | 169 | ||
113 | /* calculate first value of T */ | 170 | skcipher_request_set_crypt(subreq, rctx->src, rctx->dst, |
114 | tw(crypto_cipher_tfm(ctx->tweak), w->iv, w->iv); | 171 | cryptlen, NULL); |
115 | 172 | ||
116 | goto first; | 173 | err = skcipher_walk_virt(&w, subreq, false); |
117 | 174 | ||
118 | for (;;) { | 175 | while (w.nbytes) { |
119 | do { | 176 | unsigned int avail = w.nbytes; |
120 | gf128mul_x_ble(s.t, s.t); | 177 | be128 *wsrc; |
178 | be128 *wdst; | ||
121 | 179 | ||
122 | first: | 180 | wsrc = w.src.virt.addr; |
123 | xts_round(&s, wdst, wsrc); | 181 | wdst = w.dst.virt.addr; |
124 | 182 | ||
125 | wsrc += bs; | 183 | do { |
126 | wdst += bs; | 184 | *buf++ = rctx->t; |
185 | be128_xor(wdst++, &rctx->t, wsrc++); | ||
186 | gf128mul_x_ble(&rctx->t, &rctx->t); | ||
127 | } while ((avail -= bs) >= bs); | 187 | } while ((avail -= bs) >= bs); |
128 | 188 | ||
129 | err = blkcipher_walk_done(d, w, avail); | 189 | err = skcipher_walk_done(&w, avail); |
130 | if (!w->nbytes) | 190 | } |
131 | break; | 191 | |
192 | skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst, | ||
193 | cryptlen, NULL); | ||
132 | 194 | ||
133 | avail = w->nbytes; | 195 | if (err || !more) |
196 | goto out; | ||
134 | 197 | ||
135 | wsrc = w->src.virt.addr; | 198 | rctx->src = rctx->srcbuf; |
136 | wdst = w->dst.virt.addr; | 199 | |
200 | scatterwalk_done(&w.in, 0, 1); | ||
201 | sg = w.in.sg; | ||
202 | offset = w.in.offset; | ||
203 | |||
204 | if (rctx->src != sg) { | ||
205 | rctx->src[0] = *sg; | ||
206 | sg_unmark_end(rctx->src); | ||
207 | scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2); | ||
137 | } | 208 | } |
209 | rctx->src[0].length -= offset - sg->offset; | ||
210 | rctx->src[0].offset = offset; | ||
138 | 211 | ||
212 | out: | ||
139 | return err; | 213 | return err; |
140 | } | 214 | } |
141 | 215 | ||
142 | static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 216 | static int init_crypt(struct skcipher_request *req, crypto_completion_t done) |
143 | struct scatterlist *src, unsigned int nbytes) | ||
144 | { | 217 | { |
145 | struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); | 218 | struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); |
146 | struct blkcipher_walk w; | 219 | struct rctx *rctx = skcipher_request_ctx(req); |
220 | struct skcipher_request *subreq; | ||
221 | gfp_t gfp; | ||
222 | |||
223 | subreq = &rctx->subreq; | ||
224 | skcipher_request_set_tfm(subreq, ctx->child); | ||
225 | skcipher_request_set_callback(subreq, req->base.flags, done, req); | ||
226 | |||
227 | gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | ||
228 | GFP_ATOMIC; | ||
229 | rctx->ext = NULL; | ||
230 | |||
231 | subreq->cryptlen = XTS_BUFFER_SIZE; | ||
232 | if (req->cryptlen > XTS_BUFFER_SIZE) { | ||
233 | subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE); | ||
234 | rctx->ext = kmalloc(subreq->cryptlen, gfp); | ||
235 | } | ||
236 | |||
237 | rctx->src = req->src; | ||
238 | rctx->dst = req->dst; | ||
239 | rctx->left = req->cryptlen; | ||
147 | 240 | ||
148 | blkcipher_walk_init(&w, dst, src, nbytes); | 241 | /* calculate first value of T */ |
149 | return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->tweak)->cia_encrypt, | 242 | crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv); |
150 | crypto_cipher_alg(ctx->child)->cia_encrypt); | 243 | |
244 | return 0; | ||
151 | } | 245 | } |
152 | 246 | ||
153 | static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 247 | static void exit_crypt(struct skcipher_request *req) |
154 | struct scatterlist *src, unsigned int nbytes) | ||
155 | { | 248 | { |
156 | struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); | 249 | struct rctx *rctx = skcipher_request_ctx(req); |
157 | struct blkcipher_walk w; | 250 | |
251 | rctx->left = 0; | ||
158 | 252 | ||
159 | blkcipher_walk_init(&w, dst, src, nbytes); | 253 | if (rctx->ext) |
160 | return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->tweak)->cia_encrypt, | 254 | kzfree(rctx->ext); |
161 | crypto_cipher_alg(ctx->child)->cia_decrypt); | 255 | } |
256 | |||
257 | static int do_encrypt(struct skcipher_request *req, int err) | ||
258 | { | ||
259 | struct rctx *rctx = skcipher_request_ctx(req); | ||
260 | struct skcipher_request *subreq; | ||
261 | |||
262 | subreq = &rctx->subreq; | ||
263 | |||
264 | while (!err && rctx->left) { | ||
265 | err = pre_crypt(req) ?: | ||
266 | crypto_skcipher_encrypt(subreq) ?: | ||
267 | post_crypt(req); | ||
268 | |||
269 | if (err == -EINPROGRESS || | ||
270 | (err == -EBUSY && | ||
271 | req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | ||
272 | return err; | ||
273 | } | ||
274 | |||
275 | exit_crypt(req); | ||
276 | return err; | ||
277 | } | ||
278 | |||
279 | static void encrypt_done(struct crypto_async_request *areq, int err) | ||
280 | { | ||
281 | struct skcipher_request *req = areq->data; | ||
282 | struct skcipher_request *subreq; | ||
283 | struct rctx *rctx; | ||
284 | |||
285 | rctx = skcipher_request_ctx(req); | ||
286 | subreq = &rctx->subreq; | ||
287 | subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; | ||
288 | |||
289 | err = do_encrypt(req, err ?: post_crypt(req)); | ||
290 | if (rctx->left) | ||
291 | return; | ||
292 | |||
293 | skcipher_request_complete(req, err); | ||
294 | } | ||
295 | |||
296 | static int encrypt(struct skcipher_request *req) | ||
297 | { | ||
298 | return do_encrypt(req, init_crypt(req, encrypt_done)); | ||
299 | } | ||
300 | |||
301 | static int do_decrypt(struct skcipher_request *req, int err) | ||
302 | { | ||
303 | struct rctx *rctx = skcipher_request_ctx(req); | ||
304 | struct skcipher_request *subreq; | ||
305 | |||
306 | subreq = &rctx->subreq; | ||
307 | |||
308 | while (!err && rctx->left) { | ||
309 | err = pre_crypt(req) ?: | ||
310 | crypto_skcipher_decrypt(subreq) ?: | ||
311 | post_crypt(req); | ||
312 | |||
313 | if (err == -EINPROGRESS || | ||
314 | (err == -EBUSY && | ||
315 | req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | ||
316 | return err; | ||
317 | } | ||
318 | |||
319 | exit_crypt(req); | ||
320 | return err; | ||
321 | } | ||
322 | |||
323 | static void decrypt_done(struct crypto_async_request *areq, int err) | ||
324 | { | ||
325 | struct skcipher_request *req = areq->data; | ||
326 | struct skcipher_request *subreq; | ||
327 | struct rctx *rctx; | ||
328 | |||
329 | rctx = skcipher_request_ctx(req); | ||
330 | subreq = &rctx->subreq; | ||
331 | subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; | ||
332 | |||
333 | err = do_decrypt(req, err ?: post_crypt(req)); | ||
334 | if (rctx->left) | ||
335 | return; | ||
336 | |||
337 | skcipher_request_complete(req, err); | ||
338 | } | ||
339 | |||
340 | static int decrypt(struct skcipher_request *req) | ||
341 | { | ||
342 | return do_decrypt(req, init_crypt(req, decrypt_done)); | ||
162 | } | 343 | } |
163 | 344 | ||
164 | int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst, | 345 | int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst, |
@@ -233,112 +414,168 @@ first: | |||
233 | } | 414 | } |
234 | EXPORT_SYMBOL_GPL(xts_crypt); | 415 | EXPORT_SYMBOL_GPL(xts_crypt); |
235 | 416 | ||
236 | static int init_tfm(struct crypto_tfm *tfm) | 417 | static int init_tfm(struct crypto_skcipher *tfm) |
237 | { | 418 | { |
238 | struct crypto_cipher *cipher; | 419 | struct skcipher_instance *inst = skcipher_alg_instance(tfm); |
239 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 420 | struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst); |
240 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | 421 | struct priv *ctx = crypto_skcipher_ctx(tfm); |
241 | struct priv *ctx = crypto_tfm_ctx(tfm); | 422 | struct crypto_skcipher *child; |
242 | u32 *flags = &tfm->crt_flags; | 423 | struct crypto_cipher *tweak; |
243 | |||
244 | cipher = crypto_spawn_cipher(spawn); | ||
245 | if (IS_ERR(cipher)) | ||
246 | return PTR_ERR(cipher); | ||
247 | |||
248 | if (crypto_cipher_blocksize(cipher) != XTS_BLOCK_SIZE) { | ||
249 | *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; | ||
250 | crypto_free_cipher(cipher); | ||
251 | return -EINVAL; | ||
252 | } | ||
253 | 424 | ||
254 | ctx->child = cipher; | 425 | child = crypto_spawn_skcipher(&ictx->spawn); |
426 | if (IS_ERR(child)) | ||
427 | return PTR_ERR(child); | ||
255 | 428 | ||
256 | cipher = crypto_spawn_cipher(spawn); | 429 | ctx->child = child; |
257 | if (IS_ERR(cipher)) { | ||
258 | crypto_free_cipher(ctx->child); | ||
259 | return PTR_ERR(cipher); | ||
260 | } | ||
261 | 430 | ||
262 | /* this check isn't really needed, leave it here just in case */ | 431 | tweak = crypto_alloc_cipher(ictx->name, 0, 0); |
263 | if (crypto_cipher_blocksize(cipher) != XTS_BLOCK_SIZE) { | 432 | if (IS_ERR(tweak)) { |
264 | crypto_free_cipher(cipher); | 433 | crypto_free_skcipher(ctx->child); |
265 | crypto_free_cipher(ctx->child); | 434 | return PTR_ERR(tweak); |
266 | *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; | ||
267 | return -EINVAL; | ||
268 | } | 435 | } |
269 | 436 | ||
270 | ctx->tweak = cipher; | 437 | ctx->tweak = tweak; |
438 | |||
439 | crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) + | ||
440 | sizeof(struct rctx)); | ||
271 | 441 | ||
272 | return 0; | 442 | return 0; |
273 | } | 443 | } |
274 | 444 | ||
275 | static void exit_tfm(struct crypto_tfm *tfm) | 445 | static void exit_tfm(struct crypto_skcipher *tfm) |
276 | { | 446 | { |
277 | struct priv *ctx = crypto_tfm_ctx(tfm); | 447 | struct priv *ctx = crypto_skcipher_ctx(tfm); |
278 | crypto_free_cipher(ctx->child); | 448 | |
449 | crypto_free_skcipher(ctx->child); | ||
279 | crypto_free_cipher(ctx->tweak); | 450 | crypto_free_cipher(ctx->tweak); |
280 | } | 451 | } |
281 | 452 | ||
282 | static struct crypto_instance *alloc(struct rtattr **tb) | 453 | static void free(struct skcipher_instance *inst) |
454 | { | ||
455 | crypto_drop_skcipher(skcipher_instance_ctx(inst)); | ||
456 | kfree(inst); | ||
457 | } | ||
458 | |||
459 | static int create(struct crypto_template *tmpl, struct rtattr **tb) | ||
283 | { | 460 | { |
284 | struct crypto_instance *inst; | 461 | struct skcipher_instance *inst; |
285 | struct crypto_alg *alg; | 462 | struct crypto_attr_type *algt; |
463 | struct xts_instance_ctx *ctx; | ||
464 | struct skcipher_alg *alg; | ||
465 | const char *cipher_name; | ||
286 | int err; | 466 | int err; |
287 | 467 | ||
288 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); | 468 | algt = crypto_get_attr_type(tb); |
469 | if (IS_ERR(algt)) | ||
470 | return PTR_ERR(algt); | ||
471 | |||
472 | if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) | ||
473 | return -EINVAL; | ||
474 | |||
475 | cipher_name = crypto_attr_alg_name(tb[1]); | ||
476 | if (IS_ERR(cipher_name)) | ||
477 | return PTR_ERR(cipher_name); | ||
478 | |||
479 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | ||
480 | if (!inst) | ||
481 | return -ENOMEM; | ||
482 | |||
483 | ctx = skcipher_instance_ctx(inst); | ||
484 | |||
485 | crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst)); | ||
486 | err = crypto_grab_skcipher(&ctx->spawn, cipher_name, 0, | ||
487 | crypto_requires_sync(algt->type, | ||
488 | algt->mask)); | ||
489 | if (err == -ENOENT) { | ||
490 | err = -ENAMETOOLONG; | ||
491 | if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", | ||
492 | cipher_name) >= CRYPTO_MAX_ALG_NAME) | ||
493 | goto err_free_inst; | ||
494 | |||
495 | err = crypto_grab_skcipher(&ctx->spawn, ctx->name, 0, | ||
496 | crypto_requires_sync(algt->type, | ||
497 | algt->mask)); | ||
498 | } | ||
499 | |||
289 | if (err) | 500 | if (err) |
290 | return ERR_PTR(err); | 501 | goto err_free_inst; |
291 | 502 | ||
292 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, | 503 | alg = crypto_skcipher_spawn_alg(&ctx->spawn); |
293 | CRYPTO_ALG_TYPE_MASK); | ||
294 | if (IS_ERR(alg)) | ||
295 | return ERR_CAST(alg); | ||
296 | 504 | ||
297 | inst = crypto_alloc_instance("xts", alg); | 505 | err = -EINVAL; |
298 | if (IS_ERR(inst)) | 506 | if (alg->base.cra_blocksize != XTS_BLOCK_SIZE) |
299 | goto out_put_alg; | 507 | goto err_drop_spawn; |
300 | 508 | ||
301 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; | 509 | if (crypto_skcipher_alg_ivsize(alg)) |
302 | inst->alg.cra_priority = alg->cra_priority; | 510 | goto err_drop_spawn; |
303 | inst->alg.cra_blocksize = alg->cra_blocksize; | ||
304 | 511 | ||
305 | if (alg->cra_alignmask < 7) | 512 | err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts", |
306 | inst->alg.cra_alignmask = 7; | 513 | &alg->base); |
307 | else | 514 | if (err) |
308 | inst->alg.cra_alignmask = alg->cra_alignmask; | 515 | goto err_drop_spawn; |
309 | 516 | ||
310 | inst->alg.cra_type = &crypto_blkcipher_type; | 517 | err = -EINVAL; |
518 | cipher_name = alg->base.cra_name; | ||
311 | 519 | ||
312 | inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; | 520 | /* Alas we screwed up the naming so we have to mangle the |
313 | inst->alg.cra_blkcipher.min_keysize = | 521 | * cipher name. |
314 | 2 * alg->cra_cipher.cia_min_keysize; | 522 | */ |
315 | inst->alg.cra_blkcipher.max_keysize = | 523 | if (!strncmp(cipher_name, "ecb(", 4)) { |
316 | 2 * alg->cra_cipher.cia_max_keysize; | 524 | unsigned len; |
317 | 525 | ||
318 | inst->alg.cra_ctxsize = sizeof(struct priv); | 526 | len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name)); |
527 | if (len < 2 || len >= sizeof(ctx->name)) | ||
528 | goto err_drop_spawn; | ||
319 | 529 | ||
320 | inst->alg.cra_init = init_tfm; | 530 | if (ctx->name[len - 1] != ')') |
321 | inst->alg.cra_exit = exit_tfm; | 531 | goto err_drop_spawn; |
322 | 532 | ||
323 | inst->alg.cra_blkcipher.setkey = setkey; | 533 | ctx->name[len - 1] = 0; |
324 | inst->alg.cra_blkcipher.encrypt = encrypt; | ||
325 | inst->alg.cra_blkcipher.decrypt = decrypt; | ||
326 | 534 | ||
327 | out_put_alg: | 535 | if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, |
328 | crypto_mod_put(alg); | 536 | "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) |
329 | return inst; | 537 | return -ENAMETOOLONG; |
330 | } | 538 | } else |
539 | goto err_drop_spawn; | ||
331 | 540 | ||
332 | static void free(struct crypto_instance *inst) | 541 | inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; |
333 | { | 542 | inst->alg.base.cra_priority = alg->base.cra_priority; |
334 | crypto_drop_spawn(crypto_instance_ctx(inst)); | 543 | inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE; |
544 | inst->alg.base.cra_alignmask = alg->base.cra_alignmask | | ||
545 | (__alignof__(u64) - 1); | ||
546 | |||
547 | inst->alg.ivsize = XTS_BLOCK_SIZE; | ||
548 | inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2; | ||
549 | inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2; | ||
550 | |||
551 | inst->alg.base.cra_ctxsize = sizeof(struct priv); | ||
552 | |||
553 | inst->alg.init = init_tfm; | ||
554 | inst->alg.exit = exit_tfm; | ||
555 | |||
556 | inst->alg.setkey = setkey; | ||
557 | inst->alg.encrypt = encrypt; | ||
558 | inst->alg.decrypt = decrypt; | ||
559 | |||
560 | inst->free = free; | ||
561 | |||
562 | err = skcipher_register_instance(tmpl, inst); | ||
563 | if (err) | ||
564 | goto err_drop_spawn; | ||
565 | |||
566 | out: | ||
567 | return err; | ||
568 | |||
569 | err_drop_spawn: | ||
570 | crypto_drop_skcipher(&ctx->spawn); | ||
571 | err_free_inst: | ||
335 | kfree(inst); | 572 | kfree(inst); |
573 | goto out; | ||
336 | } | 574 | } |
337 | 575 | ||
338 | static struct crypto_template crypto_tmpl = { | 576 | static struct crypto_template crypto_tmpl = { |
339 | .name = "xts", | 577 | .name = "xts", |
340 | .alloc = alloc, | 578 | .create = create, |
341 | .free = free, | ||
342 | .module = THIS_MODULE, | 579 | .module = THIS_MODULE, |
343 | }; | 580 | }; |
344 | 581 | ||