diff options
Diffstat (limited to 'crypto')
37 files changed, 4332 insertions, 1288 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig index 4dfdd03e708f..26b5dd0cb564 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
| @@ -23,11 +23,13 @@ comment "Crypto core or helper" | |||
| 23 | 23 | ||
| 24 | config CRYPTO_FIPS | 24 | config CRYPTO_FIPS |
| 25 | bool "FIPS 200 compliance" | 25 | bool "FIPS 200 compliance" |
| 26 | depends on CRYPTO_ANSI_CPRNG | ||
| 26 | help | 27 | help |
| 27 | This options enables the fips boot option which is | 28 | This options enables the fips boot option which is |
| 28 | required if you want to system to operate in a FIPS 200 | 29 | required if you want to system to operate in a FIPS 200 |
| 29 | certification. You should say no unless you know what | 30 | certification. You should say no unless you know what |
| 30 | this is. | 31 | this is. Note that CRYPTO_ANSI_CPRNG is requred if this |
| 32 | option is selected | ||
| 31 | 33 | ||
| 32 | config CRYPTO_ALGAPI | 34 | config CRYPTO_ALGAPI |
| 33 | tristate | 35 | tristate |
| @@ -156,7 +158,7 @@ config CRYPTO_GCM | |||
| 156 | tristate "GCM/GMAC support" | 158 | tristate "GCM/GMAC support" |
| 157 | select CRYPTO_CTR | 159 | select CRYPTO_CTR |
| 158 | select CRYPTO_AEAD | 160 | select CRYPTO_AEAD |
| 159 | select CRYPTO_GF128MUL | 161 | select CRYPTO_GHASH |
| 160 | help | 162 | help |
| 161 | Support for Galois/Counter Mode (GCM) and Galois Message | 163 | Support for Galois/Counter Mode (GCM) and Galois Message |
| 162 | Authentication Code (GMAC). Required for IPSec. | 164 | Authentication Code (GMAC). Required for IPSec. |
| @@ -267,6 +269,18 @@ config CRYPTO_XCBC | |||
| 267 | http://csrc.nist.gov/encryption/modes/proposedmodes/ | 269 | http://csrc.nist.gov/encryption/modes/proposedmodes/ |
| 268 | xcbc-mac/xcbc-mac-spec.pdf | 270 | xcbc-mac/xcbc-mac-spec.pdf |
| 269 | 271 | ||
| 272 | config CRYPTO_VMAC | ||
| 273 | tristate "VMAC support" | ||
| 274 | depends on EXPERIMENTAL | ||
| 275 | select CRYPTO_HASH | ||
| 276 | select CRYPTO_MANAGER | ||
| 277 | help | ||
| 278 | VMAC is a message authentication algorithm designed for | ||
| 279 | very high speed on 64-bit architectures. | ||
| 280 | |||
| 281 | See also: | ||
| 282 | <http://fastcrypto.org/vmac> | ||
| 283 | |||
| 270 | comment "Digest" | 284 | comment "Digest" |
| 271 | 285 | ||
| 272 | config CRYPTO_CRC32C | 286 | config CRYPTO_CRC32C |
| @@ -289,6 +303,13 @@ config CRYPTO_CRC32C_INTEL | |||
| 289 | gain performance compared with software implementation. | 303 | gain performance compared with software implementation. |
| 290 | Module will be crc32c-intel. | 304 | Module will be crc32c-intel. |
| 291 | 305 | ||
| 306 | config CRYPTO_GHASH | ||
| 307 | tristate "GHASH digest algorithm" | ||
| 308 | select CRYPTO_SHASH | ||
| 309 | select CRYPTO_GF128MUL | ||
| 310 | help | ||
| 311 | GHASH is message digest algorithm for GCM (Galois/Counter Mode). | ||
| 312 | |||
| 292 | config CRYPTO_MD4 | 313 | config CRYPTO_MD4 |
| 293 | tristate "MD4 digest algorithm" | 314 | tristate "MD4 digest algorithm" |
| 294 | select CRYPTO_HASH | 315 | select CRYPTO_HASH |
| @@ -780,13 +801,14 @@ comment "Random Number Generation" | |||
| 780 | 801 | ||
| 781 | config CRYPTO_ANSI_CPRNG | 802 | config CRYPTO_ANSI_CPRNG |
| 782 | tristate "Pseudo Random Number Generation for Cryptographic modules" | 803 | tristate "Pseudo Random Number Generation for Cryptographic modules" |
| 804 | default m | ||
| 783 | select CRYPTO_AES | 805 | select CRYPTO_AES |
| 784 | select CRYPTO_RNG | 806 | select CRYPTO_RNG |
| 785 | select CRYPTO_FIPS | ||
| 786 | help | 807 | help |
| 787 | This option enables the generic pseudo random number generator | 808 | This option enables the generic pseudo random number generator |
| 788 | for cryptographic modules. Uses the Algorithm specified in | 809 | for cryptographic modules. Uses the Algorithm specified in |
| 789 | ANSI X9.31 A.2.4 | 810 | ANSI X9.31 A.2.4. Not this option must be enabled if CRYPTO_FIPS |
| 811 | is selected | ||
| 790 | 812 | ||
| 791 | source "drivers/crypto/Kconfig" | 813 | source "drivers/crypto/Kconfig" |
| 792 | 814 | ||
diff --git a/crypto/Makefile b/crypto/Makefile index 673d9f7c1bda..9e8f61908cb5 100644 --- a/crypto/Makefile +++ b/crypto/Makefile | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | obj-$(CONFIG_CRYPTO) += crypto.o | 5 | obj-$(CONFIG_CRYPTO) += crypto.o |
| 6 | crypto-objs := api.o cipher.o digest.o compress.o | 6 | crypto-objs := api.o cipher.o compress.o |
| 7 | 7 | ||
| 8 | obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o | 8 | obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o |
| 9 | 9 | ||
| @@ -22,7 +22,6 @@ obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o | |||
| 22 | obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o | 22 | obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o |
| 23 | obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o | 23 | obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o |
| 24 | 24 | ||
| 25 | crypto_hash-objs := hash.o | ||
| 26 | crypto_hash-objs += ahash.o | 25 | crypto_hash-objs += ahash.o |
| 27 | crypto_hash-objs += shash.o | 26 | crypto_hash-objs += shash.o |
| 28 | obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o | 27 | obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o |
| @@ -33,6 +32,7 @@ cryptomgr-objs := algboss.o testmgr.o | |||
| 33 | 32 | ||
| 34 | obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o | 33 | obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o |
| 35 | obj-$(CONFIG_CRYPTO_HMAC) += hmac.o | 34 | obj-$(CONFIG_CRYPTO_HMAC) += hmac.o |
| 35 | obj-$(CONFIG_CRYPTO_VMAC) += vmac.o | ||
| 36 | obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o | 36 | obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o |
| 37 | obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o | 37 | obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o |
| 38 | obj-$(CONFIG_CRYPTO_MD4) += md4.o | 38 | obj-$(CONFIG_CRYPTO_MD4) += md4.o |
| @@ -83,6 +83,7 @@ obj-$(CONFIG_CRYPTO_RNG2) += rng.o | |||
| 83 | obj-$(CONFIG_CRYPTO_RNG2) += krng.o | 83 | obj-$(CONFIG_CRYPTO_RNG2) += krng.o |
| 84 | obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o | 84 | obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o |
| 85 | obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o | 85 | obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o |
| 86 | obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o | ||
| 86 | 87 | ||
| 87 | # | 88 | # |
| 88 | # generic algorithms and the async_tx api | 89 | # generic algorithms and the async_tx api |
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index e11ce37c7104..f6f08336df5d 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | */ | 14 | */ |
| 15 | 15 | ||
| 16 | #include <crypto/internal/skcipher.h> | 16 | #include <crypto/internal/skcipher.h> |
| 17 | #include <linux/cpumask.h> | ||
| 17 | #include <linux/err.h> | 18 | #include <linux/err.h> |
| 18 | #include <linux/init.h> | 19 | #include <linux/init.h> |
| 19 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
| @@ -25,6 +26,8 @@ | |||
| 25 | 26 | ||
| 26 | #include "internal.h" | 27 | #include "internal.h" |
| 27 | 28 | ||
| 29 | static const char *skcipher_default_geniv __read_mostly; | ||
| 30 | |||
| 28 | static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key, | 31 | static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key, |
| 29 | unsigned int keylen) | 32 | unsigned int keylen) |
| 30 | { | 33 | { |
| @@ -180,7 +183,14 @@ EXPORT_SYMBOL_GPL(crypto_givcipher_type); | |||
| 180 | 183 | ||
| 181 | const char *crypto_default_geniv(const struct crypto_alg *alg) | 184 | const char *crypto_default_geniv(const struct crypto_alg *alg) |
| 182 | { | 185 | { |
| 183 | return alg->cra_flags & CRYPTO_ALG_ASYNC ? "eseqiv" : "chainiv"; | 186 | if (((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == |
| 187 | CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize : | ||
| 188 | alg->cra_ablkcipher.ivsize) != | ||
| 189 | alg->cra_blocksize) | ||
| 190 | return "chainiv"; | ||
| 191 | |||
| 192 | return alg->cra_flags & CRYPTO_ALG_ASYNC ? | ||
| 193 | "eseqiv" : skcipher_default_geniv; | ||
| 184 | } | 194 | } |
| 185 | 195 | ||
| 186 | static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) | 196 | static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) |
| @@ -201,8 +211,9 @@ static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) | |||
| 201 | int err; | 211 | int err; |
| 202 | 212 | ||
| 203 | larval = crypto_larval_lookup(alg->cra_driver_name, | 213 | larval = crypto_larval_lookup(alg->cra_driver_name, |
| 214 | (type & ~CRYPTO_ALG_TYPE_MASK) | | ||
| 204 | CRYPTO_ALG_TYPE_GIVCIPHER, | 215 | CRYPTO_ALG_TYPE_GIVCIPHER, |
| 205 | CRYPTO_ALG_TYPE_MASK); | 216 | mask | CRYPTO_ALG_TYPE_MASK); |
| 206 | err = PTR_ERR(larval); | 217 | err = PTR_ERR(larval); |
| 207 | if (IS_ERR(larval)) | 218 | if (IS_ERR(larval)) |
| 208 | goto out; | 219 | goto out; |
| @@ -360,3 +371,17 @@ err: | |||
| 360 | return ERR_PTR(err); | 371 | return ERR_PTR(err); |
| 361 | } | 372 | } |
| 362 | EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher); | 373 | EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher); |
| 374 | |||
| 375 | static int __init skcipher_module_init(void) | ||
| 376 | { | ||
| 377 | skcipher_default_geniv = num_possible_cpus() > 1 ? | ||
| 378 | "eseqiv" : "chainiv"; | ||
| 379 | return 0; | ||
| 380 | } | ||
| 381 | |||
| 382 | static void skcipher_module_exit(void) | ||
| 383 | { | ||
| 384 | } | ||
| 385 | |||
| 386 | module_init(skcipher_module_init); | ||
| 387 | module_exit(skcipher_module_exit); | ||
diff --git a/crypto/aead.c b/crypto/aead.c index d9aa733db164..0a55da70845e 100644 --- a/crypto/aead.c +++ b/crypto/aead.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
| 19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
| 20 | #include <linux/rtnetlink.h> | 20 | #include <linux/rtnetlink.h> |
| 21 | #include <linux/sched.h> | ||
| 21 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
| 22 | #include <linux/seq_file.h> | 23 | #include <linux/seq_file.h> |
| 23 | 24 | ||
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c index b8b66ec3883b..e78b7ee44a74 100644 --- a/crypto/aes_generic.c +++ b/crypto/aes_generic.c | |||
| @@ -1174,7 +1174,7 @@ EXPORT_SYMBOL_GPL(crypto_il_tab); | |||
| 1174 | ctx->key_enc[6 * i + 11] = t; \ | 1174 | ctx->key_enc[6 * i + 11] = t; \ |
| 1175 | } while (0) | 1175 | } while (0) |
| 1176 | 1176 | ||
| 1177 | #define loop8(i) do { \ | 1177 | #define loop8tophalf(i) do { \ |
| 1178 | t = ror32(t, 8); \ | 1178 | t = ror32(t, 8); \ |
| 1179 | t = ls_box(t) ^ rco_tab[i]; \ | 1179 | t = ls_box(t) ^ rco_tab[i]; \ |
| 1180 | t ^= ctx->key_enc[8 * i]; \ | 1180 | t ^= ctx->key_enc[8 * i]; \ |
| @@ -1185,6 +1185,10 @@ EXPORT_SYMBOL_GPL(crypto_il_tab); | |||
| 1185 | ctx->key_enc[8 * i + 10] = t; \ | 1185 | ctx->key_enc[8 * i + 10] = t; \ |
| 1186 | t ^= ctx->key_enc[8 * i + 3]; \ | 1186 | t ^= ctx->key_enc[8 * i + 3]; \ |
| 1187 | ctx->key_enc[8 * i + 11] = t; \ | 1187 | ctx->key_enc[8 * i + 11] = t; \ |
| 1188 | } while (0) | ||
| 1189 | |||
| 1190 | #define loop8(i) do { \ | ||
| 1191 | loop8tophalf(i); \ | ||
| 1188 | t = ctx->key_enc[8 * i + 4] ^ ls_box(t); \ | 1192 | t = ctx->key_enc[8 * i + 4] ^ ls_box(t); \ |
| 1189 | ctx->key_enc[8 * i + 12] = t; \ | 1193 | ctx->key_enc[8 * i + 12] = t; \ |
| 1190 | t ^= ctx->key_enc[8 * i + 5]; \ | 1194 | t ^= ctx->key_enc[8 * i + 5]; \ |
| @@ -1245,8 +1249,9 @@ int crypto_aes_expand_key(struct crypto_aes_ctx *ctx, const u8 *in_key, | |||
| 1245 | ctx->key_enc[5] = le32_to_cpu(key[5]); | 1249 | ctx->key_enc[5] = le32_to_cpu(key[5]); |
| 1246 | ctx->key_enc[6] = le32_to_cpu(key[6]); | 1250 | ctx->key_enc[6] = le32_to_cpu(key[6]); |
| 1247 | t = ctx->key_enc[7] = le32_to_cpu(key[7]); | 1251 | t = ctx->key_enc[7] = le32_to_cpu(key[7]); |
| 1248 | for (i = 0; i < 7; ++i) | 1252 | for (i = 0; i < 6; ++i) |
| 1249 | loop8(i); | 1253 | loop8(i); |
| 1254 | loop8tophalf(i); | ||
| 1250 | break; | 1255 | break; |
| 1251 | } | 1256 | } |
| 1252 | 1257 | ||
diff --git a/crypto/ahash.c b/crypto/ahash.c index f3476374f764..33a4ff45f842 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c | |||
| @@ -24,6 +24,19 @@ | |||
| 24 | 24 | ||
| 25 | #include "internal.h" | 25 | #include "internal.h" |
| 26 | 26 | ||
| 27 | struct ahash_request_priv { | ||
| 28 | crypto_completion_t complete; | ||
| 29 | void *data; | ||
| 30 | u8 *result; | ||
| 31 | void *ubuf[] CRYPTO_MINALIGN_ATTR; | ||
| 32 | }; | ||
| 33 | |||
| 34 | static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash) | ||
| 35 | { | ||
| 36 | return container_of(crypto_hash_alg_common(hash), struct ahash_alg, | ||
| 37 | halg); | ||
| 38 | } | ||
| 39 | |||
| 27 | static int hash_walk_next(struct crypto_hash_walk *walk) | 40 | static int hash_walk_next(struct crypto_hash_walk *walk) |
| 28 | { | 41 | { |
| 29 | unsigned int alignmask = walk->alignmask; | 42 | unsigned int alignmask = walk->alignmask; |
| @@ -132,36 +145,34 @@ int crypto_hash_walk_first_compat(struct hash_desc *hdesc, | |||
| 132 | static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, | 145 | static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, |
| 133 | unsigned int keylen) | 146 | unsigned int keylen) |
| 134 | { | 147 | { |
| 135 | struct ahash_alg *ahash = crypto_ahash_alg(tfm); | ||
| 136 | unsigned long alignmask = crypto_ahash_alignmask(tfm); | 148 | unsigned long alignmask = crypto_ahash_alignmask(tfm); |
| 137 | int ret; | 149 | int ret; |
| 138 | u8 *buffer, *alignbuffer; | 150 | u8 *buffer, *alignbuffer; |
| 139 | unsigned long absize; | 151 | unsigned long absize; |
| 140 | 152 | ||
| 141 | absize = keylen + alignmask; | 153 | absize = keylen + alignmask; |
| 142 | buffer = kmalloc(absize, GFP_ATOMIC); | 154 | buffer = kmalloc(absize, GFP_KERNEL); |
| 143 | if (!buffer) | 155 | if (!buffer) |
| 144 | return -ENOMEM; | 156 | return -ENOMEM; |
| 145 | 157 | ||
| 146 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); | 158 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); |
| 147 | memcpy(alignbuffer, key, keylen); | 159 | memcpy(alignbuffer, key, keylen); |
| 148 | ret = ahash->setkey(tfm, alignbuffer, keylen); | 160 | ret = tfm->setkey(tfm, alignbuffer, keylen); |
| 149 | memset(alignbuffer, 0, keylen); | 161 | kzfree(buffer); |
| 150 | kfree(buffer); | ||
| 151 | return ret; | 162 | return ret; |
| 152 | } | 163 | } |
| 153 | 164 | ||
| 154 | static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key, | 165 | int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, |
| 155 | unsigned int keylen) | 166 | unsigned int keylen) |
| 156 | { | 167 | { |
| 157 | struct ahash_alg *ahash = crypto_ahash_alg(tfm); | ||
| 158 | unsigned long alignmask = crypto_ahash_alignmask(tfm); | 168 | unsigned long alignmask = crypto_ahash_alignmask(tfm); |
| 159 | 169 | ||
| 160 | if ((unsigned long)key & alignmask) | 170 | if ((unsigned long)key & alignmask) |
| 161 | return ahash_setkey_unaligned(tfm, key, keylen); | 171 | return ahash_setkey_unaligned(tfm, key, keylen); |
| 162 | 172 | ||
| 163 | return ahash->setkey(tfm, key, keylen); | 173 | return tfm->setkey(tfm, key, keylen); |
| 164 | } | 174 | } |
| 175 | EXPORT_SYMBOL_GPL(crypto_ahash_setkey); | ||
| 165 | 176 | ||
| 166 | static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, | 177 | static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, |
| 167 | unsigned int keylen) | 178 | unsigned int keylen) |
| @@ -169,44 +180,221 @@ static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, | |||
| 169 | return -ENOSYS; | 180 | return -ENOSYS; |
| 170 | } | 181 | } |
| 171 | 182 | ||
| 172 | int crypto_ahash_import(struct ahash_request *req, const u8 *in) | 183 | static inline unsigned int ahash_align_buffer_size(unsigned len, |
| 184 | unsigned long mask) | ||
| 185 | { | ||
| 186 | return len + (mask & ~(crypto_tfm_ctx_alignment() - 1)); | ||
| 187 | } | ||
| 188 | |||
| 189 | static void ahash_op_unaligned_finish(struct ahash_request *req, int err) | ||
| 190 | { | ||
| 191 | struct ahash_request_priv *priv = req->priv; | ||
| 192 | |||
| 193 | if (err == -EINPROGRESS) | ||
| 194 | return; | ||
| 195 | |||
| 196 | if (!err) | ||
| 197 | memcpy(priv->result, req->result, | ||
| 198 | crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); | ||
| 199 | |||
| 200 | kzfree(priv); | ||
| 201 | } | ||
| 202 | |||
| 203 | static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) | ||
| 204 | { | ||
| 205 | struct ahash_request *areq = req->data; | ||
| 206 | struct ahash_request_priv *priv = areq->priv; | ||
| 207 | crypto_completion_t complete = priv->complete; | ||
| 208 | void *data = priv->data; | ||
| 209 | |||
| 210 | ahash_op_unaligned_finish(areq, err); | ||
| 211 | |||
| 212 | complete(data, err); | ||
| 213 | } | ||
| 214 | |||
| 215 | static int ahash_op_unaligned(struct ahash_request *req, | ||
| 216 | int (*op)(struct ahash_request *)) | ||
| 173 | { | 217 | { |
| 174 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 218 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
| 175 | struct ahash_alg *alg = crypto_ahash_alg(tfm); | 219 | unsigned long alignmask = crypto_ahash_alignmask(tfm); |
| 220 | unsigned int ds = crypto_ahash_digestsize(tfm); | ||
| 221 | struct ahash_request_priv *priv; | ||
| 222 | int err; | ||
| 223 | |||
| 224 | priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), | ||
| 225 | (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | ||
| 226 | GFP_KERNEL : GFP_ATOMIC); | ||
| 227 | if (!priv) | ||
| 228 | return -ENOMEM; | ||
| 176 | 229 | ||
| 177 | memcpy(ahash_request_ctx(req), in, crypto_ahash_reqsize(tfm)); | 230 | priv->result = req->result; |
| 231 | priv->complete = req->base.complete; | ||
| 232 | priv->data = req->base.data; | ||
| 178 | 233 | ||
| 179 | if (alg->reinit) | 234 | req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); |
| 180 | alg->reinit(req); | 235 | req->base.complete = ahash_op_unaligned_done; |
| 236 | req->base.data = req; | ||
| 237 | req->priv = priv; | ||
| 181 | 238 | ||
| 182 | return 0; | 239 | err = op(req); |
| 240 | ahash_op_unaligned_finish(req, err); | ||
| 241 | |||
| 242 | return err; | ||
| 183 | } | 243 | } |
| 184 | EXPORT_SYMBOL_GPL(crypto_ahash_import); | ||
| 185 | 244 | ||
| 186 | static unsigned int crypto_ahash_ctxsize(struct crypto_alg *alg, u32 type, | 245 | static int crypto_ahash_op(struct ahash_request *req, |
| 187 | u32 mask) | 246 | int (*op)(struct ahash_request *)) |
| 188 | { | 247 | { |
| 189 | return alg->cra_ctxsize; | 248 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
| 249 | unsigned long alignmask = crypto_ahash_alignmask(tfm); | ||
| 250 | |||
| 251 | if ((unsigned long)req->result & alignmask) | ||
| 252 | return ahash_op_unaligned(req, op); | ||
| 253 | |||
| 254 | return op(req); | ||
| 190 | } | 255 | } |
| 191 | 256 | ||
| 192 | static int crypto_init_ahash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) | 257 | int crypto_ahash_final(struct ahash_request *req) |
| 193 | { | 258 | { |
| 194 | struct ahash_alg *alg = &tfm->__crt_alg->cra_ahash; | 259 | return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final); |
| 195 | struct ahash_tfm *crt = &tfm->crt_ahash; | 260 | } |
| 261 | EXPORT_SYMBOL_GPL(crypto_ahash_final); | ||
| 196 | 262 | ||
| 197 | if (alg->digestsize > PAGE_SIZE / 8) | 263 | int crypto_ahash_finup(struct ahash_request *req) |
| 198 | return -EINVAL; | 264 | { |
| 265 | return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup); | ||
| 266 | } | ||
| 267 | EXPORT_SYMBOL_GPL(crypto_ahash_finup); | ||
| 268 | |||
| 269 | int crypto_ahash_digest(struct ahash_request *req) | ||
| 270 | { | ||
| 271 | return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest); | ||
| 272 | } | ||
| 273 | EXPORT_SYMBOL_GPL(crypto_ahash_digest); | ||
| 274 | |||
| 275 | static void ahash_def_finup_finish2(struct ahash_request *req, int err) | ||
| 276 | { | ||
| 277 | struct ahash_request_priv *priv = req->priv; | ||
| 278 | |||
| 279 | if (err == -EINPROGRESS) | ||
| 280 | return; | ||
| 281 | |||
| 282 | if (!err) | ||
| 283 | memcpy(priv->result, req->result, | ||
| 284 | crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); | ||
| 199 | 285 | ||
| 200 | crt->init = alg->init; | 286 | kzfree(priv); |
| 201 | crt->update = alg->update; | 287 | } |
| 202 | crt->final = alg->final; | 288 | |
| 203 | crt->digest = alg->digest; | 289 | static void ahash_def_finup_done2(struct crypto_async_request *req, int err) |
| 204 | crt->setkey = alg->setkey ? ahash_setkey : ahash_nosetkey; | 290 | { |
| 205 | crt->digestsize = alg->digestsize; | 291 | struct ahash_request *areq = req->data; |
| 292 | struct ahash_request_priv *priv = areq->priv; | ||
| 293 | crypto_completion_t complete = priv->complete; | ||
| 294 | void *data = priv->data; | ||
| 295 | |||
| 296 | ahash_def_finup_finish2(areq, err); | ||
| 297 | |||
| 298 | complete(data, err); | ||
| 299 | } | ||
| 300 | |||
| 301 | static int ahash_def_finup_finish1(struct ahash_request *req, int err) | ||
| 302 | { | ||
| 303 | if (err) | ||
| 304 | goto out; | ||
| 305 | |||
| 306 | req->base.complete = ahash_def_finup_done2; | ||
| 307 | req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 308 | err = crypto_ahash_reqtfm(req)->final(req); | ||
| 309 | |||
| 310 | out: | ||
| 311 | ahash_def_finup_finish2(req, err); | ||
| 312 | return err; | ||
| 313 | } | ||
| 314 | |||
| 315 | static void ahash_def_finup_done1(struct crypto_async_request *req, int err) | ||
| 316 | { | ||
| 317 | struct ahash_request *areq = req->data; | ||
| 318 | struct ahash_request_priv *priv = areq->priv; | ||
| 319 | crypto_completion_t complete = priv->complete; | ||
| 320 | void *data = priv->data; | ||
| 321 | |||
| 322 | err = ahash_def_finup_finish1(areq, err); | ||
| 323 | |||
| 324 | complete(data, err); | ||
| 325 | } | ||
| 326 | |||
| 327 | static int ahash_def_finup(struct ahash_request *req) | ||
| 328 | { | ||
| 329 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
| 330 | unsigned long alignmask = crypto_ahash_alignmask(tfm); | ||
| 331 | unsigned int ds = crypto_ahash_digestsize(tfm); | ||
| 332 | struct ahash_request_priv *priv; | ||
| 333 | |||
| 334 | priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), | ||
| 335 | (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | ||
| 336 | GFP_KERNEL : GFP_ATOMIC); | ||
| 337 | if (!priv) | ||
| 338 | return -ENOMEM; | ||
| 339 | |||
| 340 | priv->result = req->result; | ||
| 341 | priv->complete = req->base.complete; | ||
| 342 | priv->data = req->base.data; | ||
| 343 | |||
| 344 | req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); | ||
| 345 | req->base.complete = ahash_def_finup_done1; | ||
| 346 | req->base.data = req; | ||
| 347 | req->priv = priv; | ||
| 348 | |||
| 349 | return ahash_def_finup_finish1(req, tfm->update(req)); | ||
| 350 | } | ||
| 351 | |||
| 352 | static int ahash_no_export(struct ahash_request *req, void *out) | ||
| 353 | { | ||
| 354 | return -ENOSYS; | ||
| 355 | } | ||
| 356 | |||
| 357 | static int ahash_no_import(struct ahash_request *req, const void *in) | ||
| 358 | { | ||
| 359 | return -ENOSYS; | ||
| 360 | } | ||
| 361 | |||
| 362 | static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) | ||
| 363 | { | ||
| 364 | struct crypto_ahash *hash = __crypto_ahash_cast(tfm); | ||
| 365 | struct ahash_alg *alg = crypto_ahash_alg(hash); | ||
| 366 | |||
| 367 | hash->setkey = ahash_nosetkey; | ||
| 368 | hash->export = ahash_no_export; | ||
| 369 | hash->import = ahash_no_import; | ||
| 370 | |||
| 371 | if (tfm->__crt_alg->cra_type != &crypto_ahash_type) | ||
| 372 | return crypto_init_shash_ops_async(tfm); | ||
| 373 | |||
| 374 | hash->init = alg->init; | ||
| 375 | hash->update = alg->update; | ||
| 376 | hash->final = alg->final; | ||
| 377 | hash->finup = alg->finup ?: ahash_def_finup; | ||
| 378 | hash->digest = alg->digest; | ||
| 379 | |||
| 380 | if (alg->setkey) | ||
| 381 | hash->setkey = alg->setkey; | ||
| 382 | if (alg->export) | ||
| 383 | hash->export = alg->export; | ||
| 384 | if (alg->import) | ||
| 385 | hash->import = alg->import; | ||
| 206 | 386 | ||
| 207 | return 0; | 387 | return 0; |
| 208 | } | 388 | } |
| 209 | 389 | ||
| 390 | static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) | ||
| 391 | { | ||
| 392 | if (alg->cra_type == &crypto_ahash_type) | ||
| 393 | return alg->cra_ctxsize; | ||
| 394 | |||
| 395 | return sizeof(struct crypto_shash *); | ||
| 396 | } | ||
| 397 | |||
| 210 | static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) | 398 | static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) |
| 211 | __attribute__ ((unused)); | 399 | __attribute__ ((unused)); |
| 212 | static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) | 400 | static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) |
| @@ -215,17 +403,101 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) | |||
| 215 | seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? | 403 | seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? |
| 216 | "yes" : "no"); | 404 | "yes" : "no"); |
| 217 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); | 405 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); |
| 218 | seq_printf(m, "digestsize : %u\n", alg->cra_ahash.digestsize); | 406 | seq_printf(m, "digestsize : %u\n", |
| 407 | __crypto_hash_alg_common(alg)->digestsize); | ||
| 219 | } | 408 | } |
| 220 | 409 | ||
| 221 | const struct crypto_type crypto_ahash_type = { | 410 | const struct crypto_type crypto_ahash_type = { |
| 222 | .ctxsize = crypto_ahash_ctxsize, | 411 | .extsize = crypto_ahash_extsize, |
| 223 | .init = crypto_init_ahash_ops, | 412 | .init_tfm = crypto_ahash_init_tfm, |
| 224 | #ifdef CONFIG_PROC_FS | 413 | #ifdef CONFIG_PROC_FS |
| 225 | .show = crypto_ahash_show, | 414 | .show = crypto_ahash_show, |
| 226 | #endif | 415 | #endif |
| 416 | .maskclear = ~CRYPTO_ALG_TYPE_MASK, | ||
| 417 | .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, | ||
| 418 | .type = CRYPTO_ALG_TYPE_AHASH, | ||
| 419 | .tfmsize = offsetof(struct crypto_ahash, base), | ||
| 227 | }; | 420 | }; |
| 228 | EXPORT_SYMBOL_GPL(crypto_ahash_type); | 421 | EXPORT_SYMBOL_GPL(crypto_ahash_type); |
| 229 | 422 | ||
| 423 | struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, | ||
| 424 | u32 mask) | ||
| 425 | { | ||
| 426 | return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask); | ||
| 427 | } | ||
| 428 | EXPORT_SYMBOL_GPL(crypto_alloc_ahash); | ||
| 429 | |||
| 430 | static int ahash_prepare_alg(struct ahash_alg *alg) | ||
| 431 | { | ||
| 432 | struct crypto_alg *base = &alg->halg.base; | ||
| 433 | |||
| 434 | if (alg->halg.digestsize > PAGE_SIZE / 8 || | ||
| 435 | alg->halg.statesize > PAGE_SIZE / 8) | ||
| 436 | return -EINVAL; | ||
| 437 | |||
| 438 | base->cra_type = &crypto_ahash_type; | ||
| 439 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; | ||
| 440 | base->cra_flags |= CRYPTO_ALG_TYPE_AHASH; | ||
| 441 | |||
| 442 | return 0; | ||
| 443 | } | ||
| 444 | |||
| 445 | int crypto_register_ahash(struct ahash_alg *alg) | ||
| 446 | { | ||
| 447 | struct crypto_alg *base = &alg->halg.base; | ||
| 448 | int err; | ||
| 449 | |||
| 450 | err = ahash_prepare_alg(alg); | ||
| 451 | if (err) | ||
| 452 | return err; | ||
| 453 | |||
| 454 | return crypto_register_alg(base); | ||
| 455 | } | ||
| 456 | EXPORT_SYMBOL_GPL(crypto_register_ahash); | ||
| 457 | |||
| 458 | int crypto_unregister_ahash(struct ahash_alg *alg) | ||
| 459 | { | ||
| 460 | return crypto_unregister_alg(&alg->halg.base); | ||
| 461 | } | ||
| 462 | EXPORT_SYMBOL_GPL(crypto_unregister_ahash); | ||
| 463 | |||
| 464 | int ahash_register_instance(struct crypto_template *tmpl, | ||
| 465 | struct ahash_instance *inst) | ||
| 466 | { | ||
| 467 | int err; | ||
| 468 | |||
| 469 | err = ahash_prepare_alg(&inst->alg); | ||
| 470 | if (err) | ||
| 471 | return err; | ||
| 472 | |||
| 473 | return crypto_register_instance(tmpl, ahash_crypto_instance(inst)); | ||
| 474 | } | ||
| 475 | EXPORT_SYMBOL_GPL(ahash_register_instance); | ||
| 476 | |||
| 477 | void ahash_free_instance(struct crypto_instance *inst) | ||
| 478 | { | ||
| 479 | crypto_drop_spawn(crypto_instance_ctx(inst)); | ||
| 480 | kfree(ahash_instance(inst)); | ||
| 481 | } | ||
| 482 | EXPORT_SYMBOL_GPL(ahash_free_instance); | ||
| 483 | |||
| 484 | int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, | ||
| 485 | struct hash_alg_common *alg, | ||
| 486 | struct crypto_instance *inst) | ||
| 487 | { | ||
| 488 | return crypto_init_spawn2(&spawn->base, &alg->base, inst, | ||
| 489 | &crypto_ahash_type); | ||
| 490 | } | ||
| 491 | EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn); | ||
| 492 | |||
| 493 | struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask) | ||
| 494 | { | ||
| 495 | struct crypto_alg *alg; | ||
| 496 | |||
| 497 | alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask); | ||
| 498 | return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg); | ||
| 499 | } | ||
| 500 | EXPORT_SYMBOL_GPL(ahash_attr_alg); | ||
| 501 | |||
| 230 | MODULE_LICENSE("GPL"); | 502 | MODULE_LICENSE("GPL"); |
| 231 | MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); | 503 | MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); |
diff --git a/crypto/algapi.c b/crypto/algapi.c index df0863d56995..f149b1c8b76d 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c | |||
| @@ -81,16 +81,35 @@ static void crypto_destroy_instance(struct crypto_alg *alg) | |||
| 81 | crypto_tmpl_put(tmpl); | 81 | crypto_tmpl_put(tmpl); |
| 82 | } | 82 | } |
| 83 | 83 | ||
| 84 | static struct list_head *crypto_more_spawns(struct crypto_alg *alg, | ||
| 85 | struct list_head *stack, | ||
| 86 | struct list_head *top, | ||
| 87 | struct list_head *secondary_spawns) | ||
| 88 | { | ||
| 89 | struct crypto_spawn *spawn, *n; | ||
| 90 | |||
| 91 | if (list_empty(stack)) | ||
| 92 | return NULL; | ||
| 93 | |||
| 94 | spawn = list_first_entry(stack, struct crypto_spawn, list); | ||
| 95 | n = list_entry(spawn->list.next, struct crypto_spawn, list); | ||
| 96 | |||
| 97 | if (spawn->alg && &n->list != stack && !n->alg) | ||
| 98 | n->alg = (n->list.next == stack) ? alg : | ||
| 99 | &list_entry(n->list.next, struct crypto_spawn, | ||
| 100 | list)->inst->alg; | ||
| 101 | |||
| 102 | list_move(&spawn->list, secondary_spawns); | ||
| 103 | |||
| 104 | return &n->list == stack ? top : &n->inst->alg.cra_users; | ||
| 105 | } | ||
| 106 | |||
| 84 | static void crypto_remove_spawn(struct crypto_spawn *spawn, | 107 | static void crypto_remove_spawn(struct crypto_spawn *spawn, |
| 85 | struct list_head *list, | 108 | struct list_head *list) |
| 86 | struct list_head *secondary_spawns) | ||
| 87 | { | 109 | { |
| 88 | struct crypto_instance *inst = spawn->inst; | 110 | struct crypto_instance *inst = spawn->inst; |
| 89 | struct crypto_template *tmpl = inst->tmpl; | 111 | struct crypto_template *tmpl = inst->tmpl; |
| 90 | 112 | ||
| 91 | list_del_init(&spawn->list); | ||
| 92 | spawn->alg = NULL; | ||
| 93 | |||
| 94 | if (crypto_is_dead(&inst->alg)) | 113 | if (crypto_is_dead(&inst->alg)) |
| 95 | return; | 114 | return; |
| 96 | 115 | ||
| @@ -106,25 +125,55 @@ static void crypto_remove_spawn(struct crypto_spawn *spawn, | |||
| 106 | hlist_del(&inst->list); | 125 | hlist_del(&inst->list); |
| 107 | inst->alg.cra_destroy = crypto_destroy_instance; | 126 | inst->alg.cra_destroy = crypto_destroy_instance; |
| 108 | 127 | ||
| 109 | list_splice(&inst->alg.cra_users, secondary_spawns); | 128 | BUG_ON(!list_empty(&inst->alg.cra_users)); |
| 110 | } | 129 | } |
| 111 | 130 | ||
| 112 | static void crypto_remove_spawns(struct list_head *spawns, | 131 | static void crypto_remove_spawns(struct crypto_alg *alg, |
| 113 | struct list_head *list, u32 new_type) | 132 | struct list_head *list, |
| 133 | struct crypto_alg *nalg) | ||
| 114 | { | 134 | { |
| 135 | u32 new_type = (nalg ?: alg)->cra_flags; | ||
| 115 | struct crypto_spawn *spawn, *n; | 136 | struct crypto_spawn *spawn, *n; |
| 116 | LIST_HEAD(secondary_spawns); | 137 | LIST_HEAD(secondary_spawns); |
| 138 | struct list_head *spawns; | ||
| 139 | LIST_HEAD(stack); | ||
| 140 | LIST_HEAD(top); | ||
| 117 | 141 | ||
| 142 | spawns = &alg->cra_users; | ||
| 118 | list_for_each_entry_safe(spawn, n, spawns, list) { | 143 | list_for_each_entry_safe(spawn, n, spawns, list) { |
| 119 | if ((spawn->alg->cra_flags ^ new_type) & spawn->mask) | 144 | if ((spawn->alg->cra_flags ^ new_type) & spawn->mask) |
| 120 | continue; | 145 | continue; |
| 121 | 146 | ||
| 122 | crypto_remove_spawn(spawn, list, &secondary_spawns); | 147 | list_move(&spawn->list, &top); |
| 123 | } | 148 | } |
| 124 | 149 | ||
| 125 | while (!list_empty(&secondary_spawns)) { | 150 | spawns = ⊤ |
| 126 | list_for_each_entry_safe(spawn, n, &secondary_spawns, list) | 151 | do { |
| 127 | crypto_remove_spawn(spawn, list, &secondary_spawns); | 152 | while (!list_empty(spawns)) { |
| 153 | struct crypto_instance *inst; | ||
| 154 | |||
| 155 | spawn = list_first_entry(spawns, struct crypto_spawn, | ||
| 156 | list); | ||
| 157 | inst = spawn->inst; | ||
| 158 | |||
| 159 | BUG_ON(&inst->alg == alg); | ||
| 160 | |||
| 161 | list_move(&spawn->list, &stack); | ||
| 162 | |||
| 163 | if (&inst->alg == nalg) | ||
| 164 | break; | ||
| 165 | |||
| 166 | spawn->alg = NULL; | ||
| 167 | spawns = &inst->alg.cra_users; | ||
| 168 | } | ||
| 169 | } while ((spawns = crypto_more_spawns(alg, &stack, &top, | ||
| 170 | &secondary_spawns))); | ||
| 171 | |||
| 172 | list_for_each_entry_safe(spawn, n, &secondary_spawns, list) { | ||
| 173 | if (spawn->alg) | ||
| 174 | list_move(&spawn->list, &spawn->alg->cra_users); | ||
| 175 | else | ||
| 176 | crypto_remove_spawn(spawn, list); | ||
| 128 | } | 177 | } |
| 129 | } | 178 | } |
| 130 | 179 | ||
| @@ -258,7 +307,7 @@ found: | |||
| 258 | q->cra_priority > alg->cra_priority) | 307 | q->cra_priority > alg->cra_priority) |
| 259 | continue; | 308 | continue; |
| 260 | 309 | ||
| 261 | crypto_remove_spawns(&q->cra_users, &list, alg->cra_flags); | 310 | crypto_remove_spawns(q, &list, alg); |
| 262 | } | 311 | } |
| 263 | 312 | ||
| 264 | complete: | 313 | complete: |
| @@ -330,7 +379,7 @@ static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list) | |||
| 330 | 379 | ||
| 331 | crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg); | 380 | crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg); |
| 332 | list_del_init(&alg->cra_list); | 381 | list_del_init(&alg->cra_list); |
| 333 | crypto_remove_spawns(&alg->cra_users, list, alg->cra_flags); | 382 | crypto_remove_spawns(alg, list, NULL); |
| 334 | 383 | ||
| 335 | return 0; | 384 | return 0; |
| 336 | } | 385 | } |
| @@ -488,20 +537,38 @@ int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, | |||
| 488 | } | 537 | } |
| 489 | EXPORT_SYMBOL_GPL(crypto_init_spawn); | 538 | EXPORT_SYMBOL_GPL(crypto_init_spawn); |
| 490 | 539 | ||
| 540 | int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg, | ||
| 541 | struct crypto_instance *inst, | ||
| 542 | const struct crypto_type *frontend) | ||
| 543 | { | ||
| 544 | int err = -EINVAL; | ||
| 545 | |||
| 546 | if (frontend && (alg->cra_flags ^ frontend->type) & frontend->maskset) | ||
| 547 | goto out; | ||
| 548 | |||
| 549 | spawn->frontend = frontend; | ||
| 550 | err = crypto_init_spawn(spawn, alg, inst, frontend->maskset); | ||
| 551 | |||
| 552 | out: | ||
| 553 | return err; | ||
| 554 | } | ||
| 555 | EXPORT_SYMBOL_GPL(crypto_init_spawn2); | ||
| 556 | |||
| 491 | void crypto_drop_spawn(struct crypto_spawn *spawn) | 557 | void crypto_drop_spawn(struct crypto_spawn *spawn) |
| 492 | { | 558 | { |
| 559 | if (!spawn->alg) | ||
| 560 | return; | ||
| 561 | |||
| 493 | down_write(&crypto_alg_sem); | 562 | down_write(&crypto_alg_sem); |
| 494 | list_del(&spawn->list); | 563 | list_del(&spawn->list); |
| 495 | up_write(&crypto_alg_sem); | 564 | up_write(&crypto_alg_sem); |
| 496 | } | 565 | } |
| 497 | EXPORT_SYMBOL_GPL(crypto_drop_spawn); | 566 | EXPORT_SYMBOL_GPL(crypto_drop_spawn); |
| 498 | 567 | ||
| 499 | struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, | 568 | static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn) |
| 500 | u32 mask) | ||
| 501 | { | 569 | { |
| 502 | struct crypto_alg *alg; | 570 | struct crypto_alg *alg; |
| 503 | struct crypto_alg *alg2; | 571 | struct crypto_alg *alg2; |
| 504 | struct crypto_tfm *tfm; | ||
| 505 | 572 | ||
| 506 | down_read(&crypto_alg_sem); | 573 | down_read(&crypto_alg_sem); |
| 507 | alg = spawn->alg; | 574 | alg = spawn->alg; |
| @@ -516,6 +583,19 @@ struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, | |||
| 516 | return ERR_PTR(-EAGAIN); | 583 | return ERR_PTR(-EAGAIN); |
| 517 | } | 584 | } |
| 518 | 585 | ||
| 586 | return alg; | ||
| 587 | } | ||
| 588 | |||
| 589 | struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, | ||
| 590 | u32 mask) | ||
| 591 | { | ||
| 592 | struct crypto_alg *alg; | ||
| 593 | struct crypto_tfm *tfm; | ||
| 594 | |||
| 595 | alg = crypto_spawn_alg(spawn); | ||
| 596 | if (IS_ERR(alg)) | ||
| 597 | return ERR_CAST(alg); | ||
| 598 | |||
| 519 | tfm = ERR_PTR(-EINVAL); | 599 | tfm = ERR_PTR(-EINVAL); |
| 520 | if (unlikely((alg->cra_flags ^ type) & mask)) | 600 | if (unlikely((alg->cra_flags ^ type) & mask)) |
| 521 | goto out_put_alg; | 601 | goto out_put_alg; |
| @@ -532,6 +612,27 @@ out_put_alg: | |||
| 532 | } | 612 | } |
| 533 | EXPORT_SYMBOL_GPL(crypto_spawn_tfm); | 613 | EXPORT_SYMBOL_GPL(crypto_spawn_tfm); |
| 534 | 614 | ||
| 615 | void *crypto_spawn_tfm2(struct crypto_spawn *spawn) | ||
| 616 | { | ||
| 617 | struct crypto_alg *alg; | ||
| 618 | struct crypto_tfm *tfm; | ||
| 619 | |||
| 620 | alg = crypto_spawn_alg(spawn); | ||
| 621 | if (IS_ERR(alg)) | ||
| 622 | return ERR_CAST(alg); | ||
| 623 | |||
| 624 | tfm = crypto_create_tfm(alg, spawn->frontend); | ||
| 625 | if (IS_ERR(tfm)) | ||
| 626 | goto out_put_alg; | ||
| 627 | |||
| 628 | return tfm; | ||
| 629 | |||
| 630 | out_put_alg: | ||
| 631 | crypto_mod_put(alg); | ||
| 632 | return tfm; | ||
| 633 | } | ||
| 634 | EXPORT_SYMBOL_GPL(crypto_spawn_tfm2); | ||
| 635 | |||
| 535 | int crypto_register_notifier(struct notifier_block *nb) | 636 | int crypto_register_notifier(struct notifier_block *nb) |
| 536 | { | 637 | { |
| 537 | return blocking_notifier_chain_register(&crypto_chain, nb); | 638 | return blocking_notifier_chain_register(&crypto_chain, nb); |
| @@ -595,7 +696,9 @@ const char *crypto_attr_alg_name(struct rtattr *rta) | |||
| 595 | } | 696 | } |
| 596 | EXPORT_SYMBOL_GPL(crypto_attr_alg_name); | 697 | EXPORT_SYMBOL_GPL(crypto_attr_alg_name); |
| 597 | 698 | ||
| 598 | struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask) | 699 | struct crypto_alg *crypto_attr_alg2(struct rtattr *rta, |
| 700 | const struct crypto_type *frontend, | ||
| 701 | u32 type, u32 mask) | ||
| 599 | { | 702 | { |
| 600 | const char *name; | 703 | const char *name; |
| 601 | int err; | 704 | int err; |
| @@ -605,9 +708,9 @@ struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask) | |||
| 605 | if (IS_ERR(name)) | 708 | if (IS_ERR(name)) |
| 606 | return ERR_PTR(err); | 709 | return ERR_PTR(err); |
| 607 | 710 | ||
| 608 | return crypto_alg_mod_lookup(name, type, mask); | 711 | return crypto_find_alg(name, frontend, type, mask); |
| 609 | } | 712 | } |
| 610 | EXPORT_SYMBOL_GPL(crypto_attr_alg); | 713 | EXPORT_SYMBOL_GPL(crypto_attr_alg2); |
| 611 | 714 | ||
| 612 | int crypto_attr_u32(struct rtattr *rta, u32 *num) | 715 | int crypto_attr_u32(struct rtattr *rta, u32 *num) |
| 613 | { | 716 | { |
| @@ -627,17 +730,20 @@ int crypto_attr_u32(struct rtattr *rta, u32 *num) | |||
| 627 | } | 730 | } |
| 628 | EXPORT_SYMBOL_GPL(crypto_attr_u32); | 731 | EXPORT_SYMBOL_GPL(crypto_attr_u32); |
| 629 | 732 | ||
| 630 | struct crypto_instance *crypto_alloc_instance(const char *name, | 733 | void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg, |
| 631 | struct crypto_alg *alg) | 734 | unsigned int head) |
| 632 | { | 735 | { |
| 633 | struct crypto_instance *inst; | 736 | struct crypto_instance *inst; |
| 634 | struct crypto_spawn *spawn; | 737 | char *p; |
| 635 | int err; | 738 | int err; |
| 636 | 739 | ||
| 637 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); | 740 | p = kzalloc(head + sizeof(*inst) + sizeof(struct crypto_spawn), |
| 638 | if (!inst) | 741 | GFP_KERNEL); |
| 742 | if (!p) | ||
| 639 | return ERR_PTR(-ENOMEM); | 743 | return ERR_PTR(-ENOMEM); |
| 640 | 744 | ||
| 745 | inst = (void *)(p + head); | ||
| 746 | |||
| 641 | err = -ENAMETOOLONG; | 747 | err = -ENAMETOOLONG; |
| 642 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name, | 748 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name, |
| 643 | alg->cra_name) >= CRYPTO_MAX_ALG_NAME) | 749 | alg->cra_name) >= CRYPTO_MAX_ALG_NAME) |
| @@ -647,6 +753,25 @@ struct crypto_instance *crypto_alloc_instance(const char *name, | |||
| 647 | name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | 753 | name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
| 648 | goto err_free_inst; | 754 | goto err_free_inst; |
| 649 | 755 | ||
| 756 | return p; | ||
| 757 | |||
| 758 | err_free_inst: | ||
| 759 | kfree(p); | ||
| 760 | return ERR_PTR(err); | ||
| 761 | } | ||
| 762 | EXPORT_SYMBOL_GPL(crypto_alloc_instance2); | ||
| 763 | |||
| 764 | struct crypto_instance *crypto_alloc_instance(const char *name, | ||
| 765 | struct crypto_alg *alg) | ||
| 766 | { | ||
| 767 | struct crypto_instance *inst; | ||
| 768 | struct crypto_spawn *spawn; | ||
| 769 | int err; | ||
| 770 | |||
| 771 | inst = crypto_alloc_instance2(name, alg, 0); | ||
| 772 | if (IS_ERR(inst)) | ||
| 773 | goto out; | ||
| 774 | |||
| 650 | spawn = crypto_instance_ctx(inst); | 775 | spawn = crypto_instance_ctx(inst); |
| 651 | err = crypto_init_spawn(spawn, alg, inst, | 776 | err = crypto_init_spawn(spawn, alg, inst, |
| 652 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); | 777 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); |
| @@ -658,7 +783,10 @@ struct crypto_instance *crypto_alloc_instance(const char *name, | |||
| 658 | 783 | ||
| 659 | err_free_inst: | 784 | err_free_inst: |
| 660 | kfree(inst); | 785 | kfree(inst); |
| 661 | return ERR_PTR(err); | 786 | inst = ERR_PTR(err); |
| 787 | |||
| 788 | out: | ||
| 789 | return inst; | ||
| 662 | } | 790 | } |
| 663 | EXPORT_SYMBOL_GPL(crypto_alloc_instance); | 791 | EXPORT_SYMBOL_GPL(crypto_alloc_instance); |
| 664 | 792 | ||
diff --git a/crypto/algboss.c b/crypto/algboss.c index 9908dd830c26..412241ce4cfa 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c | |||
| @@ -68,6 +68,11 @@ static int cryptomgr_probe(void *data) | |||
| 68 | goto err; | 68 | goto err; |
| 69 | 69 | ||
| 70 | do { | 70 | do { |
| 71 | if (tmpl->create) { | ||
| 72 | err = tmpl->create(tmpl, param->tb); | ||
| 73 | continue; | ||
| 74 | } | ||
| 75 | |||
| 71 | inst = tmpl->alloc(param->tb); | 76 | inst = tmpl->alloc(param->tb); |
| 72 | if (IS_ERR(inst)) | 77 | if (IS_ERR(inst)) |
| 73 | err = PTR_ERR(inst); | 78 | err = PTR_ERR(inst); |
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c index d80ed4c1e009..3aa6e3834bfe 100644 --- a/crypto/ansi_cprng.c +++ b/crypto/ansi_cprng.c | |||
| @@ -187,7 +187,6 @@ static int _get_more_prng_bytes(struct prng_context *ctx) | |||
| 187 | /* Our exported functions */ | 187 | /* Our exported functions */ |
| 188 | static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx) | 188 | static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx) |
| 189 | { | 189 | { |
| 190 | unsigned long flags; | ||
| 191 | unsigned char *ptr = buf; | 190 | unsigned char *ptr = buf; |
| 192 | unsigned int byte_count = (unsigned int)nbytes; | 191 | unsigned int byte_count = (unsigned int)nbytes; |
| 193 | int err; | 192 | int err; |
| @@ -196,7 +195,7 @@ static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx) | |||
| 196 | if (nbytes < 0) | 195 | if (nbytes < 0) |
| 197 | return -EINVAL; | 196 | return -EINVAL; |
| 198 | 197 | ||
| 199 | spin_lock_irqsave(&ctx->prng_lock, flags); | 198 | spin_lock_bh(&ctx->prng_lock); |
| 200 | 199 | ||
| 201 | err = -EINVAL; | 200 | err = -EINVAL; |
| 202 | if (ctx->flags & PRNG_NEED_RESET) | 201 | if (ctx->flags & PRNG_NEED_RESET) |
| @@ -268,7 +267,7 @@ empty_rbuf: | |||
| 268 | goto remainder; | 267 | goto remainder; |
| 269 | 268 | ||
| 270 | done: | 269 | done: |
| 271 | spin_unlock_irqrestore(&ctx->prng_lock, flags); | 270 | spin_unlock_bh(&ctx->prng_lock); |
| 272 | dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n", | 271 | dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n", |
| 273 | err, ctx); | 272 | err, ctx); |
| 274 | return err; | 273 | return err; |
| @@ -284,10 +283,9 @@ static int reset_prng_context(struct prng_context *ctx, | |||
| 284 | unsigned char *V, unsigned char *DT) | 283 | unsigned char *V, unsigned char *DT) |
| 285 | { | 284 | { |
| 286 | int ret; | 285 | int ret; |
| 287 | int rc = -EINVAL; | ||
| 288 | unsigned char *prng_key; | 286 | unsigned char *prng_key; |
| 289 | 287 | ||
| 290 | spin_lock(&ctx->prng_lock); | 288 | spin_lock_bh(&ctx->prng_lock); |
| 291 | ctx->flags |= PRNG_NEED_RESET; | 289 | ctx->flags |= PRNG_NEED_RESET; |
| 292 | 290 | ||
| 293 | prng_key = (key != NULL) ? key : (unsigned char *)DEFAULT_PRNG_KEY; | 291 | prng_key = (key != NULL) ? key : (unsigned char *)DEFAULT_PRNG_KEY; |
| @@ -308,34 +306,20 @@ static int reset_prng_context(struct prng_context *ctx, | |||
| 308 | memset(ctx->rand_data, 0, DEFAULT_BLK_SZ); | 306 | memset(ctx->rand_data, 0, DEFAULT_BLK_SZ); |
| 309 | memset(ctx->last_rand_data, 0, DEFAULT_BLK_SZ); | 307 | memset(ctx->last_rand_data, 0, DEFAULT_BLK_SZ); |
| 310 | 308 | ||
| 311 | if (ctx->tfm) | ||
| 312 | crypto_free_cipher(ctx->tfm); | ||
| 313 | |||
| 314 | ctx->tfm = crypto_alloc_cipher("aes", 0, 0); | ||
| 315 | if (IS_ERR(ctx->tfm)) { | ||
| 316 | dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n", | ||
| 317 | ctx); | ||
| 318 | ctx->tfm = NULL; | ||
| 319 | goto out; | ||
| 320 | } | ||
| 321 | |||
| 322 | ctx->rand_data_valid = DEFAULT_BLK_SZ; | 309 | ctx->rand_data_valid = DEFAULT_BLK_SZ; |
| 323 | 310 | ||
| 324 | ret = crypto_cipher_setkey(ctx->tfm, prng_key, klen); | 311 | ret = crypto_cipher_setkey(ctx->tfm, prng_key, klen); |
| 325 | if (ret) { | 312 | if (ret) { |
| 326 | dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n", | 313 | dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n", |
| 327 | crypto_cipher_get_flags(ctx->tfm)); | 314 | crypto_cipher_get_flags(ctx->tfm)); |
| 328 | crypto_free_cipher(ctx->tfm); | ||
| 329 | goto out; | 315 | goto out; |
| 330 | } | 316 | } |
| 331 | 317 | ||
| 332 | rc = 0; | 318 | ret = 0; |
| 333 | ctx->flags &= ~PRNG_NEED_RESET; | 319 | ctx->flags &= ~PRNG_NEED_RESET; |
| 334 | out: | 320 | out: |
| 335 | spin_unlock(&ctx->prng_lock); | 321 | spin_unlock_bh(&ctx->prng_lock); |
| 336 | 322 | return ret; | |
| 337 | return rc; | ||
| 338 | |||
| 339 | } | 323 | } |
| 340 | 324 | ||
| 341 | static int cprng_init(struct crypto_tfm *tfm) | 325 | static int cprng_init(struct crypto_tfm *tfm) |
| @@ -343,6 +327,12 @@ static int cprng_init(struct crypto_tfm *tfm) | |||
| 343 | struct prng_context *ctx = crypto_tfm_ctx(tfm); | 327 | struct prng_context *ctx = crypto_tfm_ctx(tfm); |
| 344 | 328 | ||
| 345 | spin_lock_init(&ctx->prng_lock); | 329 | spin_lock_init(&ctx->prng_lock); |
| 330 | ctx->tfm = crypto_alloc_cipher("aes", 0, 0); | ||
| 331 | if (IS_ERR(ctx->tfm)) { | ||
| 332 | dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n", | ||
| 333 | ctx); | ||
| 334 | return PTR_ERR(ctx->tfm); | ||
| 335 | } | ||
| 346 | 336 | ||
| 347 | if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0) | 337 | if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0) |
| 348 | return -EINVAL; | 338 | return -EINVAL; |
| @@ -418,17 +408,10 @@ static struct crypto_alg rng_alg = { | |||
| 418 | /* Module initalization */ | 408 | /* Module initalization */ |
| 419 | static int __init prng_mod_init(void) | 409 | static int __init prng_mod_init(void) |
| 420 | { | 410 | { |
| 421 | int ret = 0; | ||
| 422 | |||
| 423 | if (fips_enabled) | 411 | if (fips_enabled) |
| 424 | rng_alg.cra_priority += 200; | 412 | rng_alg.cra_priority += 200; |
| 425 | 413 | ||
| 426 | ret = crypto_register_alg(&rng_alg); | 414 | return crypto_register_alg(&rng_alg); |
| 427 | |||
| 428 | if (ret) | ||
| 429 | goto out; | ||
| 430 | out: | ||
| 431 | return 0; | ||
| 432 | } | 415 | } |
| 433 | 416 | ||
| 434 | static void __exit prng_mod_fini(void) | 417 | static void __exit prng_mod_fini(void) |
diff --git a/crypto/api.c b/crypto/api.c index d5944f92b416..798526d90538 100644 --- a/crypto/api.c +++ b/crypto/api.c | |||
| @@ -285,13 +285,6 @@ static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask) | |||
| 285 | switch (crypto_tfm_alg_type(tfm)) { | 285 | switch (crypto_tfm_alg_type(tfm)) { |
| 286 | case CRYPTO_ALG_TYPE_CIPHER: | 286 | case CRYPTO_ALG_TYPE_CIPHER: |
| 287 | return crypto_init_cipher_ops(tfm); | 287 | return crypto_init_cipher_ops(tfm); |
| 288 | |||
| 289 | case CRYPTO_ALG_TYPE_DIGEST: | ||
| 290 | if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) != | ||
| 291 | CRYPTO_ALG_TYPE_HASH_MASK) | ||
| 292 | return crypto_init_digest_ops_async(tfm); | ||
| 293 | else | ||
| 294 | return crypto_init_digest_ops(tfm); | ||
| 295 | 288 | ||
| 296 | case CRYPTO_ALG_TYPE_COMPRESS: | 289 | case CRYPTO_ALG_TYPE_COMPRESS: |
| 297 | return crypto_init_compress_ops(tfm); | 290 | return crypto_init_compress_ops(tfm); |
| @@ -318,11 +311,7 @@ static void crypto_exit_ops(struct crypto_tfm *tfm) | |||
| 318 | case CRYPTO_ALG_TYPE_CIPHER: | 311 | case CRYPTO_ALG_TYPE_CIPHER: |
| 319 | crypto_exit_cipher_ops(tfm); | 312 | crypto_exit_cipher_ops(tfm); |
| 320 | break; | 313 | break; |
| 321 | 314 | ||
| 322 | case CRYPTO_ALG_TYPE_DIGEST: | ||
| 323 | crypto_exit_digest_ops(tfm); | ||
| 324 | break; | ||
| 325 | |||
| 326 | case CRYPTO_ALG_TYPE_COMPRESS: | 315 | case CRYPTO_ALG_TYPE_COMPRESS: |
| 327 | crypto_exit_compress_ops(tfm); | 316 | crypto_exit_compress_ops(tfm); |
| 328 | break; | 317 | break; |
| @@ -349,11 +338,7 @@ static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) | |||
| 349 | case CRYPTO_ALG_TYPE_CIPHER: | 338 | case CRYPTO_ALG_TYPE_CIPHER: |
| 350 | len += crypto_cipher_ctxsize(alg); | 339 | len += crypto_cipher_ctxsize(alg); |
| 351 | break; | 340 | break; |
| 352 | 341 | ||
| 353 | case CRYPTO_ALG_TYPE_DIGEST: | ||
| 354 | len += crypto_digest_ctxsize(alg); | ||
| 355 | break; | ||
| 356 | |||
| 357 | case CRYPTO_ALG_TYPE_COMPRESS: | 342 | case CRYPTO_ALG_TYPE_COMPRESS: |
| 358 | len += crypto_compress_ctxsize(alg); | 343 | len += crypto_compress_ctxsize(alg); |
| 359 | break; | 344 | break; |
| @@ -472,7 +457,7 @@ void *crypto_create_tfm(struct crypto_alg *alg, | |||
| 472 | int err = -ENOMEM; | 457 | int err = -ENOMEM; |
| 473 | 458 | ||
| 474 | tfmsize = frontend->tfmsize; | 459 | tfmsize = frontend->tfmsize; |
| 475 | total = tfmsize + sizeof(*tfm) + frontend->extsize(alg, frontend); | 460 | total = tfmsize + sizeof(*tfm) + frontend->extsize(alg); |
| 476 | 461 | ||
| 477 | mem = kzalloc(total, GFP_KERNEL); | 462 | mem = kzalloc(total, GFP_KERNEL); |
| 478 | if (mem == NULL) | 463 | if (mem == NULL) |
| @@ -481,7 +466,7 @@ void *crypto_create_tfm(struct crypto_alg *alg, | |||
| 481 | tfm = (struct crypto_tfm *)(mem + tfmsize); | 466 | tfm = (struct crypto_tfm *)(mem + tfmsize); |
| 482 | tfm->__crt_alg = alg; | 467 | tfm->__crt_alg = alg; |
| 483 | 468 | ||
| 484 | err = frontend->init_tfm(tfm, frontend); | 469 | err = frontend->init_tfm(tfm); |
| 485 | if (err) | 470 | if (err) |
| 486 | goto out_free_tfm; | 471 | goto out_free_tfm; |
| 487 | 472 | ||
| @@ -503,6 +488,27 @@ out: | |||
| 503 | } | 488 | } |
| 504 | EXPORT_SYMBOL_GPL(crypto_create_tfm); | 489 | EXPORT_SYMBOL_GPL(crypto_create_tfm); |
| 505 | 490 | ||
| 491 | struct crypto_alg *crypto_find_alg(const char *alg_name, | ||
| 492 | const struct crypto_type *frontend, | ||
| 493 | u32 type, u32 mask) | ||
| 494 | { | ||
| 495 | struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask) = | ||
| 496 | crypto_alg_mod_lookup; | ||
| 497 | |||
| 498 | if (frontend) { | ||
| 499 | type &= frontend->maskclear; | ||
| 500 | mask &= frontend->maskclear; | ||
| 501 | type |= frontend->type; | ||
| 502 | mask |= frontend->maskset; | ||
| 503 | |||
| 504 | if (frontend->lookup) | ||
| 505 | lookup = frontend->lookup; | ||
| 506 | } | ||
| 507 | |||
| 508 | return lookup(alg_name, type, mask); | ||
| 509 | } | ||
| 510 | EXPORT_SYMBOL_GPL(crypto_find_alg); | ||
| 511 | |||
| 506 | /* | 512 | /* |
| 507 | * crypto_alloc_tfm - Locate algorithm and allocate transform | 513 | * crypto_alloc_tfm - Locate algorithm and allocate transform |
| 508 | * @alg_name: Name of algorithm | 514 | * @alg_name: Name of algorithm |
| @@ -526,21 +532,13 @@ EXPORT_SYMBOL_GPL(crypto_create_tfm); | |||
| 526 | void *crypto_alloc_tfm(const char *alg_name, | 532 | void *crypto_alloc_tfm(const char *alg_name, |
| 527 | const struct crypto_type *frontend, u32 type, u32 mask) | 533 | const struct crypto_type *frontend, u32 type, u32 mask) |
| 528 | { | 534 | { |
| 529 | struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask); | ||
| 530 | void *tfm; | 535 | void *tfm; |
| 531 | int err; | 536 | int err; |
| 532 | 537 | ||
| 533 | type &= frontend->maskclear; | ||
| 534 | mask &= frontend->maskclear; | ||
| 535 | type |= frontend->type; | ||
| 536 | mask |= frontend->maskset; | ||
| 537 | |||
| 538 | lookup = frontend->lookup ?: crypto_alg_mod_lookup; | ||
| 539 | |||
| 540 | for (;;) { | 538 | for (;;) { |
| 541 | struct crypto_alg *alg; | 539 | struct crypto_alg *alg; |
| 542 | 540 | ||
| 543 | alg = lookup(alg_name, type, mask); | 541 | alg = crypto_find_alg(alg_name, frontend, type, mask); |
| 544 | if (IS_ERR(alg)) { | 542 | if (IS_ERR(alg)) { |
| 545 | err = PTR_ERR(alg); | 543 | err = PTR_ERR(alg); |
| 546 | goto err; | 544 | goto err; |
diff --git a/crypto/async_tx/Kconfig b/crypto/async_tx/Kconfig index d8fb39145986..e28e276ac611 100644 --- a/crypto/async_tx/Kconfig +++ b/crypto/async_tx/Kconfig | |||
| @@ -14,3 +14,17 @@ config ASYNC_MEMSET | |||
| 14 | tristate | 14 | tristate |
| 15 | select ASYNC_CORE | 15 | select ASYNC_CORE |
| 16 | 16 | ||
| 17 | config ASYNC_PQ | ||
| 18 | tristate | ||
| 19 | select ASYNC_CORE | ||
| 20 | |||
| 21 | config ASYNC_RAID6_RECOV | ||
| 22 | tristate | ||
| 23 | select ASYNC_CORE | ||
| 24 | select ASYNC_PQ | ||
| 25 | |||
| 26 | config ASYNC_TX_DISABLE_PQ_VAL_DMA | ||
| 27 | bool | ||
| 28 | |||
| 29 | config ASYNC_TX_DISABLE_XOR_VAL_DMA | ||
| 30 | bool | ||
diff --git a/crypto/async_tx/Makefile b/crypto/async_tx/Makefile index 27baa7d52fbc..d1e0e6f72bc1 100644 --- a/crypto/async_tx/Makefile +++ b/crypto/async_tx/Makefile | |||
| @@ -2,3 +2,6 @@ obj-$(CONFIG_ASYNC_CORE) += async_tx.o | |||
| 2 | obj-$(CONFIG_ASYNC_MEMCPY) += async_memcpy.o | 2 | obj-$(CONFIG_ASYNC_MEMCPY) += async_memcpy.o |
| 3 | obj-$(CONFIG_ASYNC_MEMSET) += async_memset.o | 3 | obj-$(CONFIG_ASYNC_MEMSET) += async_memset.o |
| 4 | obj-$(CONFIG_ASYNC_XOR) += async_xor.o | 4 | obj-$(CONFIG_ASYNC_XOR) += async_xor.o |
| 5 | obj-$(CONFIG_ASYNC_PQ) += async_pq.o | ||
| 6 | obj-$(CONFIG_ASYNC_RAID6_RECOV) += async_raid6_recov.o | ||
| 7 | obj-$(CONFIG_ASYNC_RAID6_TEST) += raid6test.o | ||
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index ddccfb01c416..0ec1fb69d4ea 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c | |||
| @@ -33,28 +33,31 @@ | |||
| 33 | * async_memcpy - attempt to copy memory with a dma engine. | 33 | * async_memcpy - attempt to copy memory with a dma engine. |
| 34 | * @dest: destination page | 34 | * @dest: destination page |
| 35 | * @src: src page | 35 | * @src: src page |
| 36 | * @offset: offset in pages to start transaction | 36 | * @dest_offset: offset into 'dest' to start transaction |
| 37 | * @src_offset: offset into 'src' to start transaction | ||
| 37 | * @len: length in bytes | 38 | * @len: length in bytes |
| 38 | * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK, | 39 | * @submit: submission / completion modifiers |
| 39 | * @depend_tx: memcpy depends on the result of this transaction | 40 | * |
| 40 | * @cb_fn: function to call when the memcpy completes | 41 | * honored flags: ASYNC_TX_ACK |
| 41 | * @cb_param: parameter to pass to the callback routine | ||
| 42 | */ | 42 | */ |
| 43 | struct dma_async_tx_descriptor * | 43 | struct dma_async_tx_descriptor * |
| 44 | async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, | 44 | async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, |
| 45 | unsigned int src_offset, size_t len, enum async_tx_flags flags, | 45 | unsigned int src_offset, size_t len, |
| 46 | struct dma_async_tx_descriptor *depend_tx, | 46 | struct async_submit_ctl *submit) |
| 47 | dma_async_tx_callback cb_fn, void *cb_param) | ||
| 48 | { | 47 | { |
| 49 | struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMCPY, | 48 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMCPY, |
| 50 | &dest, 1, &src, 1, len); | 49 | &dest, 1, &src, 1, len); |
| 51 | struct dma_device *device = chan ? chan->device : NULL; | 50 | struct dma_device *device = chan ? chan->device : NULL; |
| 52 | struct dma_async_tx_descriptor *tx = NULL; | 51 | struct dma_async_tx_descriptor *tx = NULL; |
| 53 | 52 | ||
| 54 | if (device) { | 53 | if (device && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { |
| 55 | dma_addr_t dma_dest, dma_src; | 54 | dma_addr_t dma_dest, dma_src; |
| 56 | unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; | 55 | unsigned long dma_prep_flags = 0; |
| 57 | 56 | ||
| 57 | if (submit->cb_fn) | ||
| 58 | dma_prep_flags |= DMA_PREP_INTERRUPT; | ||
| 59 | if (submit->flags & ASYNC_TX_FENCE) | ||
| 60 | dma_prep_flags |= DMA_PREP_FENCE; | ||
| 58 | dma_dest = dma_map_page(device->dev, dest, dest_offset, len, | 61 | dma_dest = dma_map_page(device->dev, dest, dest_offset, len, |
| 59 | DMA_FROM_DEVICE); | 62 | DMA_FROM_DEVICE); |
| 60 | 63 | ||
| @@ -67,13 +70,13 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, | |||
| 67 | 70 | ||
| 68 | if (tx) { | 71 | if (tx) { |
| 69 | pr_debug("%s: (async) len: %zu\n", __func__, len); | 72 | pr_debug("%s: (async) len: %zu\n", __func__, len); |
| 70 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); | 73 | async_tx_submit(chan, tx, submit); |
| 71 | } else { | 74 | } else { |
| 72 | void *dest_buf, *src_buf; | 75 | void *dest_buf, *src_buf; |
| 73 | pr_debug("%s: (sync) len: %zu\n", __func__, len); | 76 | pr_debug("%s: (sync) len: %zu\n", __func__, len); |
| 74 | 77 | ||
| 75 | /* wait for any prerequisite operations */ | 78 | /* wait for any prerequisite operations */ |
| 76 | async_tx_quiesce(&depend_tx); | 79 | async_tx_quiesce(&submit->depend_tx); |
| 77 | 80 | ||
| 78 | dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset; | 81 | dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset; |
| 79 | src_buf = kmap_atomic(src, KM_USER1) + src_offset; | 82 | src_buf = kmap_atomic(src, KM_USER1) + src_offset; |
| @@ -83,26 +86,13 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, | |||
| 83 | kunmap_atomic(dest_buf, KM_USER0); | 86 | kunmap_atomic(dest_buf, KM_USER0); |
| 84 | kunmap_atomic(src_buf, KM_USER1); | 87 | kunmap_atomic(src_buf, KM_USER1); |
| 85 | 88 | ||
| 86 | async_tx_sync_epilog(cb_fn, cb_param); | 89 | async_tx_sync_epilog(submit); |
| 87 | } | 90 | } |
| 88 | 91 | ||
| 89 | return tx; | 92 | return tx; |
| 90 | } | 93 | } |
| 91 | EXPORT_SYMBOL_GPL(async_memcpy); | 94 | EXPORT_SYMBOL_GPL(async_memcpy); |
| 92 | 95 | ||
| 93 | static int __init async_memcpy_init(void) | ||
| 94 | { | ||
| 95 | return 0; | ||
| 96 | } | ||
| 97 | |||
| 98 | static void __exit async_memcpy_exit(void) | ||
| 99 | { | ||
| 100 | do { } while (0); | ||
| 101 | } | ||
| 102 | |||
| 103 | module_init(async_memcpy_init); | ||
| 104 | module_exit(async_memcpy_exit); | ||
| 105 | |||
| 106 | MODULE_AUTHOR("Intel Corporation"); | 96 | MODULE_AUTHOR("Intel Corporation"); |
| 107 | MODULE_DESCRIPTION("asynchronous memcpy api"); | 97 | MODULE_DESCRIPTION("asynchronous memcpy api"); |
| 108 | MODULE_LICENSE("GPL"); | 98 | MODULE_LICENSE("GPL"); |
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c index 5b5eb99bb244..58e4a8752aee 100644 --- a/crypto/async_tx/async_memset.c +++ b/crypto/async_tx/async_memset.c | |||
| @@ -35,26 +35,26 @@ | |||
| 35 | * @val: fill value | 35 | * @val: fill value |
| 36 | * @offset: offset in pages to start transaction | 36 | * @offset: offset in pages to start transaction |
| 37 | * @len: length in bytes | 37 | * @len: length in bytes |
| 38 | * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK | 38 | * |
| 39 | * @depend_tx: memset depends on the result of this transaction | 39 | * honored flags: ASYNC_TX_ACK |
| 40 | * @cb_fn: function to call when the memcpy completes | ||
| 41 | * @cb_param: parameter to pass to the callback routine | ||
| 42 | */ | 40 | */ |
| 43 | struct dma_async_tx_descriptor * | 41 | struct dma_async_tx_descriptor * |
| 44 | async_memset(struct page *dest, int val, unsigned int offset, | 42 | async_memset(struct page *dest, int val, unsigned int offset, size_t len, |
| 45 | size_t len, enum async_tx_flags flags, | 43 | struct async_submit_ctl *submit) |
| 46 | struct dma_async_tx_descriptor *depend_tx, | ||
| 47 | dma_async_tx_callback cb_fn, void *cb_param) | ||
| 48 | { | 44 | { |
| 49 | struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET, | 45 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMSET, |
| 50 | &dest, 1, NULL, 0, len); | 46 | &dest, 1, NULL, 0, len); |
| 51 | struct dma_device *device = chan ? chan->device : NULL; | 47 | struct dma_device *device = chan ? chan->device : NULL; |
| 52 | struct dma_async_tx_descriptor *tx = NULL; | 48 | struct dma_async_tx_descriptor *tx = NULL; |
| 53 | 49 | ||
| 54 | if (device) { | 50 | if (device && is_dma_fill_aligned(device, offset, 0, len)) { |
| 55 | dma_addr_t dma_dest; | 51 | dma_addr_t dma_dest; |
| 56 | unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; | 52 | unsigned long dma_prep_flags = 0; |
| 57 | 53 | ||
| 54 | if (submit->cb_fn) | ||
| 55 | dma_prep_flags |= DMA_PREP_INTERRUPT; | ||
| 56 | if (submit->flags & ASYNC_TX_FENCE) | ||
| 57 | dma_prep_flags |= DMA_PREP_FENCE; | ||
| 58 | dma_dest = dma_map_page(device->dev, dest, offset, len, | 58 | dma_dest = dma_map_page(device->dev, dest, offset, len, |
| 59 | DMA_FROM_DEVICE); | 59 | DMA_FROM_DEVICE); |
| 60 | 60 | ||
| @@ -64,38 +64,25 @@ async_memset(struct page *dest, int val, unsigned int offset, | |||
| 64 | 64 | ||
| 65 | if (tx) { | 65 | if (tx) { |
| 66 | pr_debug("%s: (async) len: %zu\n", __func__, len); | 66 | pr_debug("%s: (async) len: %zu\n", __func__, len); |
| 67 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); | 67 | async_tx_submit(chan, tx, submit); |
| 68 | } else { /* run the memset synchronously */ | 68 | } else { /* run the memset synchronously */ |
| 69 | void *dest_buf; | 69 | void *dest_buf; |
| 70 | pr_debug("%s: (sync) len: %zu\n", __func__, len); | 70 | pr_debug("%s: (sync) len: %zu\n", __func__, len); |
| 71 | 71 | ||
| 72 | dest_buf = (void *) (((char *) page_address(dest)) + offset); | 72 | dest_buf = page_address(dest) + offset; |
| 73 | 73 | ||
| 74 | /* wait for any prerequisite operations */ | 74 | /* wait for any prerequisite operations */ |
| 75 | async_tx_quiesce(&depend_tx); | 75 | async_tx_quiesce(&submit->depend_tx); |
| 76 | 76 | ||
| 77 | memset(dest_buf, val, len); | 77 | memset(dest_buf, val, len); |
| 78 | 78 | ||
| 79 | async_tx_sync_epilog(cb_fn, cb_param); | 79 | async_tx_sync_epilog(submit); |
| 80 | } | 80 | } |
| 81 | 81 | ||
| 82 | return tx; | 82 | return tx; |
| 83 | } | 83 | } |
| 84 | EXPORT_SYMBOL_GPL(async_memset); | 84 | EXPORT_SYMBOL_GPL(async_memset); |
| 85 | 85 | ||
| 86 | static int __init async_memset_init(void) | ||
| 87 | { | ||
| 88 | return 0; | ||
| 89 | } | ||
| 90 | |||
| 91 | static void __exit async_memset_exit(void) | ||
| 92 | { | ||
| 93 | do { } while (0); | ||
| 94 | } | ||
| 95 | |||
| 96 | module_init(async_memset_init); | ||
| 97 | module_exit(async_memset_exit); | ||
| 98 | |||
| 99 | MODULE_AUTHOR("Intel Corporation"); | 86 | MODULE_AUTHOR("Intel Corporation"); |
| 100 | MODULE_DESCRIPTION("asynchronous memset api"); | 87 | MODULE_DESCRIPTION("asynchronous memset api"); |
| 101 | MODULE_LICENSE("GPL"); | 88 | MODULE_LICENSE("GPL"); |
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c new file mode 100644 index 000000000000..ec87f53d5059 --- /dev/null +++ b/crypto/async_tx/async_pq.c | |||
| @@ -0,0 +1,415 @@ | |||
| 1 | /* | ||
| 2 | * Copyright(c) 2007 Yuri Tikhonov <yur@emcraft.com> | ||
| 3 | * Copyright(c) 2009 Intel Corporation | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License as published by the Free | ||
| 7 | * Software Foundation; either version 2 of the License, or (at your option) | ||
| 8 | * any later version. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 13 | * more details. | ||
| 14 | * | ||
| 15 | * You should have received a copy of the GNU General Public License along with | ||
| 16 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
| 17 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 18 | * | ||
| 19 | * The full GNU General Public License is included in this distribution in the | ||
| 20 | * file called COPYING. | ||
| 21 | */ | ||
| 22 | #include <linux/kernel.h> | ||
| 23 | #include <linux/interrupt.h> | ||
| 24 | #include <linux/dma-mapping.h> | ||
| 25 | #include <linux/raid/pq.h> | ||
| 26 | #include <linux/async_tx.h> | ||
| 27 | |||
| 28 | /** | ||
| 29 | * pq_scribble_page - space to hold throwaway P or Q buffer for | ||
| 30 | * synchronous gen_syndrome | ||
| 31 | */ | ||
| 32 | static struct page *pq_scribble_page; | ||
| 33 | |||
| 34 | /* the struct page *blocks[] parameter passed to async_gen_syndrome() | ||
| 35 | * and async_syndrome_val() contains the 'P' destination address at | ||
| 36 | * blocks[disks-2] and the 'Q' destination address at blocks[disks-1] | ||
| 37 | * | ||
| 38 | * note: these are macros as they are used as lvalues | ||
| 39 | */ | ||
| 40 | #define P(b, d) (b[d-2]) | ||
| 41 | #define Q(b, d) (b[d-1]) | ||
| 42 | |||
| 43 | /** | ||
| 44 | * do_async_gen_syndrome - asynchronously calculate P and/or Q | ||
| 45 | */ | ||
| 46 | static __async_inline struct dma_async_tx_descriptor * | ||
| 47 | do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, | ||
| 48 | const unsigned char *scfs, unsigned int offset, int disks, | ||
| 49 | size_t len, dma_addr_t *dma_src, | ||
| 50 | struct async_submit_ctl *submit) | ||
| 51 | { | ||
| 52 | struct dma_async_tx_descriptor *tx = NULL; | ||
| 53 | struct dma_device *dma = chan->device; | ||
| 54 | enum dma_ctrl_flags dma_flags = 0; | ||
| 55 | enum async_tx_flags flags_orig = submit->flags; | ||
| 56 | dma_async_tx_callback cb_fn_orig = submit->cb_fn; | ||
| 57 | dma_async_tx_callback cb_param_orig = submit->cb_param; | ||
| 58 | int src_cnt = disks - 2; | ||
| 59 | unsigned char coefs[src_cnt]; | ||
| 60 | unsigned short pq_src_cnt; | ||
| 61 | dma_addr_t dma_dest[2]; | ||
| 62 | int src_off = 0; | ||
| 63 | int idx; | ||
| 64 | int i; | ||
| 65 | |||
| 66 | /* DMAs use destinations as sources, so use BIDIRECTIONAL mapping */ | ||
| 67 | if (P(blocks, disks)) | ||
| 68 | dma_dest[0] = dma_map_page(dma->dev, P(blocks, disks), offset, | ||
| 69 | len, DMA_BIDIRECTIONAL); | ||
| 70 | else | ||
| 71 | dma_flags |= DMA_PREP_PQ_DISABLE_P; | ||
| 72 | if (Q(blocks, disks)) | ||
| 73 | dma_dest[1] = dma_map_page(dma->dev, Q(blocks, disks), offset, | ||
| 74 | len, DMA_BIDIRECTIONAL); | ||
| 75 | else | ||
| 76 | dma_flags |= DMA_PREP_PQ_DISABLE_Q; | ||
| 77 | |||
| 78 | /* convert source addresses being careful to collapse 'empty' | ||
| 79 | * sources and update the coefficients accordingly | ||
| 80 | */ | ||
| 81 | for (i = 0, idx = 0; i < src_cnt; i++) { | ||
| 82 | if (blocks[i] == NULL) | ||
| 83 | continue; | ||
| 84 | dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len, | ||
| 85 | DMA_TO_DEVICE); | ||
| 86 | coefs[idx] = scfs[i]; | ||
| 87 | idx++; | ||
| 88 | } | ||
| 89 | src_cnt = idx; | ||
| 90 | |||
| 91 | while (src_cnt > 0) { | ||
| 92 | submit->flags = flags_orig; | ||
| 93 | pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags)); | ||
| 94 | /* if we are submitting additional pqs, leave the chain open, | ||
| 95 | * clear the callback parameters, and leave the destination | ||
| 96 | * buffers mapped | ||
| 97 | */ | ||
| 98 | if (src_cnt > pq_src_cnt) { | ||
| 99 | submit->flags &= ~ASYNC_TX_ACK; | ||
| 100 | submit->flags |= ASYNC_TX_FENCE; | ||
| 101 | dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP; | ||
| 102 | submit->cb_fn = NULL; | ||
| 103 | submit->cb_param = NULL; | ||
| 104 | } else { | ||
| 105 | dma_flags &= ~DMA_COMPL_SKIP_DEST_UNMAP; | ||
| 106 | submit->cb_fn = cb_fn_orig; | ||
| 107 | submit->cb_param = cb_param_orig; | ||
| 108 | if (cb_fn_orig) | ||
| 109 | dma_flags |= DMA_PREP_INTERRUPT; | ||
| 110 | } | ||
| 111 | if (submit->flags & ASYNC_TX_FENCE) | ||
| 112 | dma_flags |= DMA_PREP_FENCE; | ||
| 113 | |||
| 114 | /* Since we have clobbered the src_list we are committed | ||
| 115 | * to doing this asynchronously. Drivers force forward | ||
| 116 | * progress in case they can not provide a descriptor | ||
| 117 | */ | ||
| 118 | for (;;) { | ||
| 119 | tx = dma->device_prep_dma_pq(chan, dma_dest, | ||
| 120 | &dma_src[src_off], | ||
| 121 | pq_src_cnt, | ||
| 122 | &coefs[src_off], len, | ||
| 123 | dma_flags); | ||
| 124 | if (likely(tx)) | ||
| 125 | break; | ||
| 126 | async_tx_quiesce(&submit->depend_tx); | ||
| 127 | dma_async_issue_pending(chan); | ||
| 128 | } | ||
| 129 | |||
| 130 | async_tx_submit(chan, tx, submit); | ||
| 131 | submit->depend_tx = tx; | ||
| 132 | |||
| 133 | /* drop completed sources */ | ||
| 134 | src_cnt -= pq_src_cnt; | ||
| 135 | src_off += pq_src_cnt; | ||
| 136 | |||
| 137 | dma_flags |= DMA_PREP_CONTINUE; | ||
| 138 | } | ||
| 139 | |||
| 140 | return tx; | ||
| 141 | } | ||
| 142 | |||
| 143 | /** | ||
| 144 | * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome | ||
| 145 | */ | ||
| 146 | static void | ||
| 147 | do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | ||
| 148 | size_t len, struct async_submit_ctl *submit) | ||
| 149 | { | ||
| 150 | void **srcs; | ||
| 151 | int i; | ||
| 152 | |||
| 153 | if (submit->scribble) | ||
| 154 | srcs = submit->scribble; | ||
| 155 | else | ||
| 156 | srcs = (void **) blocks; | ||
| 157 | |||
| 158 | for (i = 0; i < disks; i++) { | ||
| 159 | if (blocks[i] == NULL) { | ||
| 160 | BUG_ON(i > disks - 3); /* P or Q can't be zero */ | ||
| 161 | srcs[i] = (void*)raid6_empty_zero_page; | ||
| 162 | } else | ||
| 163 | srcs[i] = page_address(blocks[i]) + offset; | ||
| 164 | } | ||
| 165 | raid6_call.gen_syndrome(disks, len, srcs); | ||
| 166 | async_tx_sync_epilog(submit); | ||
| 167 | } | ||
| 168 | |||
| 169 | /** | ||
| 170 | * async_gen_syndrome - asynchronously calculate a raid6 syndrome | ||
| 171 | * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 | ||
| 172 | * @offset: common offset into each block (src and dest) to start transaction | ||
| 173 | * @disks: number of blocks (including missing P or Q, see below) | ||
| 174 | * @len: length of operation in bytes | ||
| 175 | * @submit: submission/completion modifiers | ||
| 176 | * | ||
| 177 | * General note: This routine assumes a field of GF(2^8) with a | ||
| 178 | * primitive polynomial of 0x11d and a generator of {02}. | ||
| 179 | * | ||
| 180 | * 'disks' note: callers can optionally omit either P or Q (but not | ||
| 181 | * both) from the calculation by setting blocks[disks-2] or | ||
| 182 | * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <= | ||
| 183 | * PAGE_SIZE as a temporary buffer of this size is used in the | ||
| 184 | * synchronous path. 'disks' always accounts for both destination | ||
| 185 | * buffers. If any source buffers (blocks[i] where i < disks - 2) are | ||
| 186 | * set to NULL those buffers will be replaced with the raid6_zero_page | ||
| 187 | * in the synchronous path and omitted in the hardware-asynchronous | ||
| 188 | * path. | ||
| 189 | * | ||
| 190 | * 'blocks' note: if submit->scribble is NULL then the contents of | ||
| 191 | * 'blocks' may be overwritten to perform address conversions | ||
| 192 | * (dma_map_page() or page_address()). | ||
| 193 | */ | ||
| 194 | struct dma_async_tx_descriptor * | ||
| 195 | async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | ||
| 196 | size_t len, struct async_submit_ctl *submit) | ||
| 197 | { | ||
| 198 | int src_cnt = disks - 2; | ||
| 199 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, | ||
| 200 | &P(blocks, disks), 2, | ||
| 201 | blocks, src_cnt, len); | ||
| 202 | struct dma_device *device = chan ? chan->device : NULL; | ||
| 203 | dma_addr_t *dma_src = NULL; | ||
| 204 | |||
| 205 | BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); | ||
| 206 | |||
| 207 | if (submit->scribble) | ||
| 208 | dma_src = submit->scribble; | ||
| 209 | else if (sizeof(dma_addr_t) <= sizeof(struct page *)) | ||
| 210 | dma_src = (dma_addr_t *) blocks; | ||
| 211 | |||
| 212 | if (dma_src && device && | ||
| 213 | (src_cnt <= dma_maxpq(device, 0) || | ||
| 214 | dma_maxpq(device, DMA_PREP_CONTINUE) > 0) && | ||
| 215 | is_dma_pq_aligned(device, offset, 0, len)) { | ||
| 216 | /* run the p+q asynchronously */ | ||
| 217 | pr_debug("%s: (async) disks: %d len: %zu\n", | ||
| 218 | __func__, disks, len); | ||
| 219 | return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset, | ||
| 220 | disks, len, dma_src, submit); | ||
| 221 | } | ||
| 222 | |||
| 223 | /* run the pq synchronously */ | ||
| 224 | pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len); | ||
| 225 | |||
| 226 | /* wait for any prerequisite operations */ | ||
| 227 | async_tx_quiesce(&submit->depend_tx); | ||
| 228 | |||
| 229 | if (!P(blocks, disks)) { | ||
| 230 | P(blocks, disks) = pq_scribble_page; | ||
| 231 | BUG_ON(len + offset > PAGE_SIZE); | ||
| 232 | } | ||
| 233 | if (!Q(blocks, disks)) { | ||
| 234 | Q(blocks, disks) = pq_scribble_page; | ||
| 235 | BUG_ON(len + offset > PAGE_SIZE); | ||
| 236 | } | ||
| 237 | do_sync_gen_syndrome(blocks, offset, disks, len, submit); | ||
| 238 | |||
| 239 | return NULL; | ||
| 240 | } | ||
| 241 | EXPORT_SYMBOL_GPL(async_gen_syndrome); | ||
| 242 | |||
| 243 | static inline struct dma_chan * | ||
| 244 | pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len) | ||
| 245 | { | ||
| 246 | #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA | ||
| 247 | return NULL; | ||
| 248 | #endif | ||
| 249 | return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0, blocks, | ||
| 250 | disks, len); | ||
| 251 | } | ||
| 252 | |||
| 253 | /** | ||
| 254 | * async_syndrome_val - asynchronously validate a raid6 syndrome | ||
| 255 | * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 | ||
| 256 | * @offset: common offset into each block (src and dest) to start transaction | ||
| 257 | * @disks: number of blocks (including missing P or Q, see below) | ||
| 258 | * @len: length of operation in bytes | ||
| 259 | * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set | ||
| 260 | * @spare: temporary result buffer for the synchronous case | ||
| 261 | * @submit: submission / completion modifiers | ||
| 262 | * | ||
| 263 | * The same notes from async_gen_syndrome apply to the 'blocks', | ||
| 264 | * and 'disks' parameters of this routine. The synchronous path | ||
| 265 | * requires a temporary result buffer and submit->scribble to be | ||
| 266 | * specified. | ||
| 267 | */ | ||
| 268 | struct dma_async_tx_descriptor * | ||
| 269 | async_syndrome_val(struct page **blocks, unsigned int offset, int disks, | ||
| 270 | size_t len, enum sum_check_flags *pqres, struct page *spare, | ||
| 271 | struct async_submit_ctl *submit) | ||
| 272 | { | ||
| 273 | struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len); | ||
| 274 | struct dma_device *device = chan ? chan->device : NULL; | ||
| 275 | struct dma_async_tx_descriptor *tx; | ||
| 276 | unsigned char coefs[disks-2]; | ||
| 277 | enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; | ||
| 278 | dma_addr_t *dma_src = NULL; | ||
| 279 | int src_cnt = 0; | ||
| 280 | |||
| 281 | BUG_ON(disks < 4); | ||
| 282 | |||
| 283 | if (submit->scribble) | ||
| 284 | dma_src = submit->scribble; | ||
| 285 | else if (sizeof(dma_addr_t) <= sizeof(struct page *)) | ||
| 286 | dma_src = (dma_addr_t *) blocks; | ||
| 287 | |||
| 288 | if (dma_src && device && disks <= dma_maxpq(device, 0) && | ||
| 289 | is_dma_pq_aligned(device, offset, 0, len)) { | ||
| 290 | struct device *dev = device->dev; | ||
| 291 | dma_addr_t *pq = &dma_src[disks-2]; | ||
| 292 | int i; | ||
| 293 | |||
| 294 | pr_debug("%s: (async) disks: %d len: %zu\n", | ||
| 295 | __func__, disks, len); | ||
| 296 | if (!P(blocks, disks)) | ||
| 297 | dma_flags |= DMA_PREP_PQ_DISABLE_P; | ||
| 298 | else | ||
| 299 | pq[0] = dma_map_page(dev, P(blocks, disks), | ||
| 300 | offset, len, | ||
| 301 | DMA_TO_DEVICE); | ||
| 302 | if (!Q(blocks, disks)) | ||
| 303 | dma_flags |= DMA_PREP_PQ_DISABLE_Q; | ||
| 304 | else | ||
| 305 | pq[1] = dma_map_page(dev, Q(blocks, disks), | ||
| 306 | offset, len, | ||
| 307 | DMA_TO_DEVICE); | ||
| 308 | |||
| 309 | if (submit->flags & ASYNC_TX_FENCE) | ||
| 310 | dma_flags |= DMA_PREP_FENCE; | ||
| 311 | for (i = 0; i < disks-2; i++) | ||
| 312 | if (likely(blocks[i])) { | ||
| 313 | dma_src[src_cnt] = dma_map_page(dev, blocks[i], | ||
| 314 | offset, len, | ||
| 315 | DMA_TO_DEVICE); | ||
| 316 | coefs[src_cnt] = raid6_gfexp[i]; | ||
| 317 | src_cnt++; | ||
| 318 | } | ||
| 319 | |||
| 320 | for (;;) { | ||
| 321 | tx = device->device_prep_dma_pq_val(chan, pq, dma_src, | ||
| 322 | src_cnt, | ||
| 323 | coefs, | ||
| 324 | len, pqres, | ||
| 325 | dma_flags); | ||
| 326 | if (likely(tx)) | ||
| 327 | break; | ||
| 328 | async_tx_quiesce(&submit->depend_tx); | ||
| 329 | dma_async_issue_pending(chan); | ||
| 330 | } | ||
| 331 | async_tx_submit(chan, tx, submit); | ||
| 332 | |||
| 333 | return tx; | ||
| 334 | } else { | ||
| 335 | struct page *p_src = P(blocks, disks); | ||
| 336 | struct page *q_src = Q(blocks, disks); | ||
| 337 | enum async_tx_flags flags_orig = submit->flags; | ||
| 338 | dma_async_tx_callback cb_fn_orig = submit->cb_fn; | ||
| 339 | void *scribble = submit->scribble; | ||
| 340 | void *cb_param_orig = submit->cb_param; | ||
| 341 | void *p, *q, *s; | ||
| 342 | |||
| 343 | pr_debug("%s: (sync) disks: %d len: %zu\n", | ||
| 344 | __func__, disks, len); | ||
| 345 | |||
| 346 | /* caller must provide a temporary result buffer and | ||
| 347 | * allow the input parameters to be preserved | ||
| 348 | */ | ||
| 349 | BUG_ON(!spare || !scribble); | ||
| 350 | |||
| 351 | /* wait for any prerequisite operations */ | ||
| 352 | async_tx_quiesce(&submit->depend_tx); | ||
| 353 | |||
| 354 | /* recompute p and/or q into the temporary buffer and then | ||
| 355 | * check to see the result matches the current value | ||
| 356 | */ | ||
| 357 | tx = NULL; | ||
| 358 | *pqres = 0; | ||
| 359 | if (p_src) { | ||
| 360 | init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL, | ||
| 361 | NULL, NULL, scribble); | ||
| 362 | tx = async_xor(spare, blocks, offset, disks-2, len, submit); | ||
| 363 | async_tx_quiesce(&tx); | ||
| 364 | p = page_address(p_src) + offset; | ||
| 365 | s = page_address(spare) + offset; | ||
| 366 | *pqres |= !!memcmp(p, s, len) << SUM_CHECK_P; | ||
| 367 | } | ||
| 368 | |||
| 369 | if (q_src) { | ||
| 370 | P(blocks, disks) = NULL; | ||
| 371 | Q(blocks, disks) = spare; | ||
| 372 | init_async_submit(submit, 0, NULL, NULL, NULL, scribble); | ||
| 373 | tx = async_gen_syndrome(blocks, offset, disks, len, submit); | ||
| 374 | async_tx_quiesce(&tx); | ||
| 375 | q = page_address(q_src) + offset; | ||
| 376 | s = page_address(spare) + offset; | ||
| 377 | *pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q; | ||
| 378 | } | ||
| 379 | |||
| 380 | /* restore P, Q and submit */ | ||
| 381 | P(blocks, disks) = p_src; | ||
| 382 | Q(blocks, disks) = q_src; | ||
| 383 | |||
| 384 | submit->cb_fn = cb_fn_orig; | ||
| 385 | submit->cb_param = cb_param_orig; | ||
| 386 | submit->flags = flags_orig; | ||
| 387 | async_tx_sync_epilog(submit); | ||
| 388 | |||
| 389 | return NULL; | ||
| 390 | } | ||
| 391 | } | ||
| 392 | EXPORT_SYMBOL_GPL(async_syndrome_val); | ||
| 393 | |||
| 394 | static int __init async_pq_init(void) | ||
| 395 | { | ||
| 396 | pq_scribble_page = alloc_page(GFP_KERNEL); | ||
| 397 | |||
| 398 | if (pq_scribble_page) | ||
| 399 | return 0; | ||
| 400 | |||
| 401 | pr_err("%s: failed to allocate required spare page\n", __func__); | ||
| 402 | |||
| 403 | return -ENOMEM; | ||
| 404 | } | ||
| 405 | |||
| 406 | static void __exit async_pq_exit(void) | ||
| 407 | { | ||
| 408 | put_page(pq_scribble_page); | ||
| 409 | } | ||
| 410 | |||
| 411 | module_init(async_pq_init); | ||
| 412 | module_exit(async_pq_exit); | ||
| 413 | |||
| 414 | MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation"); | ||
| 415 | MODULE_LICENSE("GPL"); | ||
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c new file mode 100644 index 000000000000..943f2abac9b4 --- /dev/null +++ b/crypto/async_tx/async_raid6_recov.c | |||
| @@ -0,0 +1,500 @@ | |||
| 1 | /* | ||
| 2 | * Asynchronous RAID-6 recovery calculations ASYNC_TX API. | ||
| 3 | * Copyright(c) 2009 Intel Corporation | ||
| 4 | * | ||
| 5 | * based on raid6recov.c: | ||
| 6 | * Copyright 2002 H. Peter Anvin | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms of the GNU General Public License as published by the Free | ||
| 10 | * Software Foundation; either version 2 of the License, or (at your option) | ||
| 11 | * any later version. | ||
| 12 | * | ||
| 13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 16 | * more details. | ||
| 17 | * | ||
| 18 | * You should have received a copy of the GNU General Public License along with | ||
| 19 | * this program; if not, write to the Free Software Foundation, Inc., 51 | ||
| 20 | * Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | #include <linux/kernel.h> | ||
| 24 | #include <linux/interrupt.h> | ||
| 25 | #include <linux/dma-mapping.h> | ||
| 26 | #include <linux/raid/pq.h> | ||
| 27 | #include <linux/async_tx.h> | ||
| 28 | |||
| 29 | static struct dma_async_tx_descriptor * | ||
| 30 | async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, | ||
| 31 | size_t len, struct async_submit_ctl *submit) | ||
| 32 | { | ||
| 33 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, | ||
| 34 | &dest, 1, srcs, 2, len); | ||
| 35 | struct dma_device *dma = chan ? chan->device : NULL; | ||
| 36 | const u8 *amul, *bmul; | ||
| 37 | u8 ax, bx; | ||
| 38 | u8 *a, *b, *c; | ||
| 39 | |||
| 40 | if (dma) { | ||
| 41 | dma_addr_t dma_dest[2]; | ||
| 42 | dma_addr_t dma_src[2]; | ||
| 43 | struct device *dev = dma->dev; | ||
| 44 | struct dma_async_tx_descriptor *tx; | ||
| 45 | enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; | ||
| 46 | |||
| 47 | if (submit->flags & ASYNC_TX_FENCE) | ||
| 48 | dma_flags |= DMA_PREP_FENCE; | ||
| 49 | dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); | ||
| 50 | dma_src[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE); | ||
| 51 | dma_src[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE); | ||
| 52 | tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 2, coef, | ||
| 53 | len, dma_flags); | ||
| 54 | if (tx) { | ||
| 55 | async_tx_submit(chan, tx, submit); | ||
| 56 | return tx; | ||
| 57 | } | ||
| 58 | |||
| 59 | /* could not get a descriptor, unmap and fall through to | ||
| 60 | * the synchronous path | ||
| 61 | */ | ||
| 62 | dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL); | ||
| 63 | dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE); | ||
| 64 | dma_unmap_page(dev, dma_src[1], len, DMA_TO_DEVICE); | ||
| 65 | } | ||
| 66 | |||
| 67 | /* run the operation synchronously */ | ||
| 68 | async_tx_quiesce(&submit->depend_tx); | ||
| 69 | amul = raid6_gfmul[coef[0]]; | ||
| 70 | bmul = raid6_gfmul[coef[1]]; | ||
| 71 | a = page_address(srcs[0]); | ||
| 72 | b = page_address(srcs[1]); | ||
| 73 | c = page_address(dest); | ||
| 74 | |||
| 75 | while (len--) { | ||
| 76 | ax = amul[*a++]; | ||
| 77 | bx = bmul[*b++]; | ||
| 78 | *c++ = ax ^ bx; | ||
| 79 | } | ||
| 80 | |||
| 81 | return NULL; | ||
| 82 | } | ||
| 83 | |||
| 84 | static struct dma_async_tx_descriptor * | ||
| 85 | async_mult(struct page *dest, struct page *src, u8 coef, size_t len, | ||
| 86 | struct async_submit_ctl *submit) | ||
| 87 | { | ||
| 88 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, | ||
| 89 | &dest, 1, &src, 1, len); | ||
| 90 | struct dma_device *dma = chan ? chan->device : NULL; | ||
| 91 | const u8 *qmul; /* Q multiplier table */ | ||
| 92 | u8 *d, *s; | ||
| 93 | |||
| 94 | if (dma) { | ||
| 95 | dma_addr_t dma_dest[2]; | ||
| 96 | dma_addr_t dma_src[1]; | ||
| 97 | struct device *dev = dma->dev; | ||
| 98 | struct dma_async_tx_descriptor *tx; | ||
| 99 | enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; | ||
| 100 | |||
| 101 | if (submit->flags & ASYNC_TX_FENCE) | ||
| 102 | dma_flags |= DMA_PREP_FENCE; | ||
| 103 | dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); | ||
| 104 | dma_src[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE); | ||
| 105 | tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 1, &coef, | ||
| 106 | len, dma_flags); | ||
| 107 | if (tx) { | ||
| 108 | async_tx_submit(chan, tx, submit); | ||
| 109 | return tx; | ||
| 110 | } | ||
| 111 | |||
| 112 | /* could not get a descriptor, unmap and fall through to | ||
| 113 | * the synchronous path | ||
| 114 | */ | ||
| 115 | dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL); | ||
| 116 | dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE); | ||
| 117 | } | ||
| 118 | |||
| 119 | /* no channel available, or failed to allocate a descriptor, so | ||
| 120 | * perform the operation synchronously | ||
| 121 | */ | ||
| 122 | async_tx_quiesce(&submit->depend_tx); | ||
| 123 | qmul = raid6_gfmul[coef]; | ||
| 124 | d = page_address(dest); | ||
| 125 | s = page_address(src); | ||
| 126 | |||
| 127 | while (len--) | ||
| 128 | *d++ = qmul[*s++]; | ||
| 129 | |||
| 130 | return NULL; | ||
| 131 | } | ||
| 132 | |||
| 133 | static struct dma_async_tx_descriptor * | ||
| 134 | __2data_recov_4(int disks, size_t bytes, int faila, int failb, | ||
| 135 | struct page **blocks, struct async_submit_ctl *submit) | ||
| 136 | { | ||
| 137 | struct dma_async_tx_descriptor *tx = NULL; | ||
| 138 | struct page *p, *q, *a, *b; | ||
| 139 | struct page *srcs[2]; | ||
| 140 | unsigned char coef[2]; | ||
| 141 | enum async_tx_flags flags = submit->flags; | ||
| 142 | dma_async_tx_callback cb_fn = submit->cb_fn; | ||
| 143 | void *cb_param = submit->cb_param; | ||
| 144 | void *scribble = submit->scribble; | ||
| 145 | |||
| 146 | p = blocks[disks-2]; | ||
| 147 | q = blocks[disks-1]; | ||
| 148 | |||
| 149 | a = blocks[faila]; | ||
| 150 | b = blocks[failb]; | ||
| 151 | |||
| 152 | /* in the 4 disk case P + Pxy == P and Q + Qxy == Q */ | ||
| 153 | /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ | ||
| 154 | srcs[0] = p; | ||
| 155 | srcs[1] = q; | ||
| 156 | coef[0] = raid6_gfexi[failb-faila]; | ||
| 157 | coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; | ||
| 158 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); | ||
| 159 | tx = async_sum_product(b, srcs, coef, bytes, submit); | ||
| 160 | |||
| 161 | /* Dy = P+Pxy+Dx */ | ||
| 162 | srcs[0] = p; | ||
| 163 | srcs[1] = b; | ||
| 164 | init_async_submit(submit, flags | ASYNC_TX_XOR_ZERO_DST, tx, cb_fn, | ||
| 165 | cb_param, scribble); | ||
| 166 | tx = async_xor(a, srcs, 0, 2, bytes, submit); | ||
| 167 | |||
| 168 | return tx; | ||
| 169 | |||
| 170 | } | ||
| 171 | |||
| 172 | static struct dma_async_tx_descriptor * | ||
| 173 | __2data_recov_5(int disks, size_t bytes, int faila, int failb, | ||
| 174 | struct page **blocks, struct async_submit_ctl *submit) | ||
| 175 | { | ||
| 176 | struct dma_async_tx_descriptor *tx = NULL; | ||
| 177 | struct page *p, *q, *g, *dp, *dq; | ||
| 178 | struct page *srcs[2]; | ||
| 179 | unsigned char coef[2]; | ||
| 180 | enum async_tx_flags flags = submit->flags; | ||
| 181 | dma_async_tx_callback cb_fn = submit->cb_fn; | ||
| 182 | void *cb_param = submit->cb_param; | ||
| 183 | void *scribble = submit->scribble; | ||
| 184 | int good_srcs, good, i; | ||
| 185 | |||
| 186 | good_srcs = 0; | ||
| 187 | good = -1; | ||
| 188 | for (i = 0; i < disks-2; i++) { | ||
| 189 | if (blocks[i] == NULL) | ||
| 190 | continue; | ||
| 191 | if (i == faila || i == failb) | ||
| 192 | continue; | ||
| 193 | good = i; | ||
| 194 | good_srcs++; | ||
| 195 | } | ||
| 196 | BUG_ON(good_srcs > 1); | ||
| 197 | |||
| 198 | p = blocks[disks-2]; | ||
| 199 | q = blocks[disks-1]; | ||
| 200 | g = blocks[good]; | ||
| 201 | |||
| 202 | /* Compute syndrome with zero for the missing data pages | ||
| 203 | * Use the dead data pages as temporary storage for delta p and | ||
| 204 | * delta q | ||
| 205 | */ | ||
| 206 | dp = blocks[faila]; | ||
| 207 | dq = blocks[failb]; | ||
| 208 | |||
| 209 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); | ||
| 210 | tx = async_memcpy(dp, g, 0, 0, bytes, submit); | ||
| 211 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); | ||
| 212 | tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit); | ||
| 213 | |||
| 214 | /* compute P + Pxy */ | ||
| 215 | srcs[0] = dp; | ||
| 216 | srcs[1] = p; | ||
| 217 | init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, | ||
| 218 | NULL, NULL, scribble); | ||
| 219 | tx = async_xor(dp, srcs, 0, 2, bytes, submit); | ||
| 220 | |||
| 221 | /* compute Q + Qxy */ | ||
| 222 | srcs[0] = dq; | ||
| 223 | srcs[1] = q; | ||
| 224 | init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, | ||
| 225 | NULL, NULL, scribble); | ||
| 226 | tx = async_xor(dq, srcs, 0, 2, bytes, submit); | ||
| 227 | |||
| 228 | /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ | ||
| 229 | srcs[0] = dp; | ||
| 230 | srcs[1] = dq; | ||
| 231 | coef[0] = raid6_gfexi[failb-faila]; | ||
| 232 | coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; | ||
| 233 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); | ||
| 234 | tx = async_sum_product(dq, srcs, coef, bytes, submit); | ||
| 235 | |||
| 236 | /* Dy = P+Pxy+Dx */ | ||
| 237 | srcs[0] = dp; | ||
| 238 | srcs[1] = dq; | ||
| 239 | init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, | ||
| 240 | cb_param, scribble); | ||
| 241 | tx = async_xor(dp, srcs, 0, 2, bytes, submit); | ||
| 242 | |||
| 243 | return tx; | ||
| 244 | } | ||
| 245 | |||
| 246 | static struct dma_async_tx_descriptor * | ||
| 247 | __2data_recov_n(int disks, size_t bytes, int faila, int failb, | ||
| 248 | struct page **blocks, struct async_submit_ctl *submit) | ||
| 249 | { | ||
| 250 | struct dma_async_tx_descriptor *tx = NULL; | ||
| 251 | struct page *p, *q, *dp, *dq; | ||
| 252 | struct page *srcs[2]; | ||
| 253 | unsigned char coef[2]; | ||
| 254 | enum async_tx_flags flags = submit->flags; | ||
| 255 | dma_async_tx_callback cb_fn = submit->cb_fn; | ||
| 256 | void *cb_param = submit->cb_param; | ||
| 257 | void *scribble = submit->scribble; | ||
| 258 | |||
| 259 | p = blocks[disks-2]; | ||
| 260 | q = blocks[disks-1]; | ||
| 261 | |||
| 262 | /* Compute syndrome with zero for the missing data pages | ||
| 263 | * Use the dead data pages as temporary storage for | ||
| 264 | * delta p and delta q | ||
| 265 | */ | ||
| 266 | dp = blocks[faila]; | ||
| 267 | blocks[faila] = NULL; | ||
| 268 | blocks[disks-2] = dp; | ||
| 269 | dq = blocks[failb]; | ||
| 270 | blocks[failb] = NULL; | ||
| 271 | blocks[disks-1] = dq; | ||
| 272 | |||
| 273 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); | ||
| 274 | tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); | ||
| 275 | |||
| 276 | /* Restore pointer table */ | ||
| 277 | blocks[faila] = dp; | ||
| 278 | blocks[failb] = dq; | ||
| 279 | blocks[disks-2] = p; | ||
| 280 | blocks[disks-1] = q; | ||
| 281 | |||
| 282 | /* compute P + Pxy */ | ||
| 283 | srcs[0] = dp; | ||
| 284 | srcs[1] = p; | ||
| 285 | init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, | ||
| 286 | NULL, NULL, scribble); | ||
| 287 | tx = async_xor(dp, srcs, 0, 2, bytes, submit); | ||
| 288 | |||
| 289 | /* compute Q + Qxy */ | ||
| 290 | srcs[0] = dq; | ||
| 291 | srcs[1] = q; | ||
| 292 | init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, | ||
| 293 | NULL, NULL, scribble); | ||
| 294 | tx = async_xor(dq, srcs, 0, 2, bytes, submit); | ||
| 295 | |||
| 296 | /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ | ||
| 297 | srcs[0] = dp; | ||
| 298 | srcs[1] = dq; | ||
| 299 | coef[0] = raid6_gfexi[failb-faila]; | ||
| 300 | coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; | ||
| 301 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); | ||
| 302 | tx = async_sum_product(dq, srcs, coef, bytes, submit); | ||
| 303 | |||
| 304 | /* Dy = P+Pxy+Dx */ | ||
| 305 | srcs[0] = dp; | ||
| 306 | srcs[1] = dq; | ||
| 307 | init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, | ||
| 308 | cb_param, scribble); | ||
| 309 | tx = async_xor(dp, srcs, 0, 2, bytes, submit); | ||
| 310 | |||
| 311 | return tx; | ||
| 312 | } | ||
| 313 | |||
| 314 | /** | ||
| 315 | * async_raid6_2data_recov - asynchronously calculate two missing data blocks | ||
| 316 | * @disks: number of disks in the RAID-6 array | ||
| 317 | * @bytes: block size | ||
| 318 | * @faila: first failed drive index | ||
| 319 | * @failb: second failed drive index | ||
| 320 | * @blocks: array of source pointers where the last two entries are p and q | ||
| 321 | * @submit: submission/completion modifiers | ||
| 322 | */ | ||
| 323 | struct dma_async_tx_descriptor * | ||
| 324 | async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, | ||
| 325 | struct page **blocks, struct async_submit_ctl *submit) | ||
| 326 | { | ||
| 327 | int non_zero_srcs, i; | ||
| 328 | |||
| 329 | BUG_ON(faila == failb); | ||
| 330 | if (failb < faila) | ||
| 331 | swap(faila, failb); | ||
| 332 | |||
| 333 | pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); | ||
| 334 | |||
| 335 | /* we need to preserve the contents of 'blocks' for the async | ||
| 336 | * case, so punt to synchronous if a scribble buffer is not available | ||
| 337 | */ | ||
| 338 | if (!submit->scribble) { | ||
| 339 | void **ptrs = (void **) blocks; | ||
| 340 | |||
| 341 | async_tx_quiesce(&submit->depend_tx); | ||
| 342 | for (i = 0; i < disks; i++) | ||
| 343 | if (blocks[i] == NULL) | ||
| 344 | ptrs[i] = (void *) raid6_empty_zero_page; | ||
| 345 | else | ||
| 346 | ptrs[i] = page_address(blocks[i]); | ||
| 347 | |||
| 348 | raid6_2data_recov(disks, bytes, faila, failb, ptrs); | ||
| 349 | |||
| 350 | async_tx_sync_epilog(submit); | ||
| 351 | |||
| 352 | return NULL; | ||
| 353 | } | ||
| 354 | |||
| 355 | non_zero_srcs = 0; | ||
| 356 | for (i = 0; i < disks-2 && non_zero_srcs < 4; i++) | ||
| 357 | if (blocks[i]) | ||
| 358 | non_zero_srcs++; | ||
| 359 | switch (non_zero_srcs) { | ||
| 360 | case 0: | ||
| 361 | case 1: | ||
| 362 | /* There must be at least 2 sources - the failed devices. */ | ||
| 363 | BUG(); | ||
| 364 | |||
| 365 | case 2: | ||
| 366 | /* dma devices do not uniformly understand a zero source pq | ||
| 367 | * operation (in contrast to the synchronous case), so | ||
| 368 | * explicitly handle the special case of a 4 disk array with | ||
| 369 | * both data disks missing. | ||
| 370 | */ | ||
| 371 | return __2data_recov_4(disks, bytes, faila, failb, blocks, submit); | ||
| 372 | case 3: | ||
| 373 | /* dma devices do not uniformly understand a single | ||
| 374 | * source pq operation (in contrast to the synchronous | ||
| 375 | * case), so explicitly handle the special case of a 5 disk | ||
| 376 | * array with 2 of 3 data disks missing. | ||
| 377 | */ | ||
| 378 | return __2data_recov_5(disks, bytes, faila, failb, blocks, submit); | ||
| 379 | default: | ||
| 380 | return __2data_recov_n(disks, bytes, faila, failb, blocks, submit); | ||
| 381 | } | ||
| 382 | } | ||
| 383 | EXPORT_SYMBOL_GPL(async_raid6_2data_recov); | ||
| 384 | |||
| 385 | /** | ||
| 386 | * async_raid6_datap_recov - asynchronously calculate a data and the 'p' block | ||
| 387 | * @disks: number of disks in the RAID-6 array | ||
| 388 | * @bytes: block size | ||
| 389 | * @faila: failed drive index | ||
| 390 | * @blocks: array of source pointers where the last two entries are p and q | ||
| 391 | * @submit: submission/completion modifiers | ||
| 392 | */ | ||
| 393 | struct dma_async_tx_descriptor * | ||
| 394 | async_raid6_datap_recov(int disks, size_t bytes, int faila, | ||
| 395 | struct page **blocks, struct async_submit_ctl *submit) | ||
| 396 | { | ||
| 397 | struct dma_async_tx_descriptor *tx = NULL; | ||
| 398 | struct page *p, *q, *dq; | ||
| 399 | u8 coef; | ||
| 400 | enum async_tx_flags flags = submit->flags; | ||
| 401 | dma_async_tx_callback cb_fn = submit->cb_fn; | ||
| 402 | void *cb_param = submit->cb_param; | ||
| 403 | void *scribble = submit->scribble; | ||
| 404 | int good_srcs, good, i; | ||
| 405 | struct page *srcs[2]; | ||
| 406 | |||
| 407 | pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); | ||
| 408 | |||
| 409 | /* we need to preserve the contents of 'blocks' for the async | ||
| 410 | * case, so punt to synchronous if a scribble buffer is not available | ||
| 411 | */ | ||
| 412 | if (!scribble) { | ||
| 413 | void **ptrs = (void **) blocks; | ||
| 414 | |||
| 415 | async_tx_quiesce(&submit->depend_tx); | ||
| 416 | for (i = 0; i < disks; i++) | ||
| 417 | if (blocks[i] == NULL) | ||
| 418 | ptrs[i] = (void*)raid6_empty_zero_page; | ||
| 419 | else | ||
| 420 | ptrs[i] = page_address(blocks[i]); | ||
| 421 | |||
| 422 | raid6_datap_recov(disks, bytes, faila, ptrs); | ||
| 423 | |||
| 424 | async_tx_sync_epilog(submit); | ||
| 425 | |||
| 426 | return NULL; | ||
| 427 | } | ||
| 428 | |||
| 429 | good_srcs = 0; | ||
| 430 | good = -1; | ||
| 431 | for (i = 0; i < disks-2; i++) { | ||
| 432 | if (i == faila) | ||
| 433 | continue; | ||
| 434 | if (blocks[i]) { | ||
| 435 | good = i; | ||
| 436 | good_srcs++; | ||
| 437 | if (good_srcs > 1) | ||
| 438 | break; | ||
| 439 | } | ||
| 440 | } | ||
| 441 | BUG_ON(good_srcs == 0); | ||
| 442 | |||
| 443 | p = blocks[disks-2]; | ||
| 444 | q = blocks[disks-1]; | ||
| 445 | |||
| 446 | /* Compute syndrome with zero for the missing data page | ||
| 447 | * Use the dead data page as temporary storage for delta q | ||
| 448 | */ | ||
| 449 | dq = blocks[faila]; | ||
| 450 | blocks[faila] = NULL; | ||
| 451 | blocks[disks-1] = dq; | ||
| 452 | |||
| 453 | /* in the 4-disk case we only need to perform a single source | ||
| 454 | * multiplication with the one good data block. | ||
| 455 | */ | ||
| 456 | if (good_srcs == 1) { | ||
| 457 | struct page *g = blocks[good]; | ||
| 458 | |||
| 459 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, | ||
| 460 | scribble); | ||
| 461 | tx = async_memcpy(p, g, 0, 0, bytes, submit); | ||
| 462 | |||
| 463 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, | ||
| 464 | scribble); | ||
| 465 | tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit); | ||
| 466 | } else { | ||
| 467 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, | ||
| 468 | scribble); | ||
| 469 | tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); | ||
| 470 | } | ||
| 471 | |||
| 472 | /* Restore pointer table */ | ||
| 473 | blocks[faila] = dq; | ||
| 474 | blocks[disks-1] = q; | ||
| 475 | |||
| 476 | /* calculate g^{-faila} */ | ||
| 477 | coef = raid6_gfinv[raid6_gfexp[faila]]; | ||
| 478 | |||
| 479 | srcs[0] = dq; | ||
| 480 | srcs[1] = q; | ||
| 481 | init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, | ||
| 482 | NULL, NULL, scribble); | ||
| 483 | tx = async_xor(dq, srcs, 0, 2, bytes, submit); | ||
| 484 | |||
| 485 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); | ||
| 486 | tx = async_mult(dq, dq, coef, bytes, submit); | ||
| 487 | |||
| 488 | srcs[0] = p; | ||
| 489 | srcs[1] = dq; | ||
| 490 | init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, | ||
| 491 | cb_param, scribble); | ||
| 492 | tx = async_xor(p, srcs, 0, 2, bytes, submit); | ||
| 493 | |||
| 494 | return tx; | ||
| 495 | } | ||
| 496 | EXPORT_SYMBOL_GPL(async_raid6_datap_recov); | ||
| 497 | |||
| 498 | MODULE_AUTHOR("Dan Williams <dan.j.williams@intel.com>"); | ||
| 499 | MODULE_DESCRIPTION("asynchronous RAID-6 recovery api"); | ||
| 500 | MODULE_LICENSE("GPL"); | ||
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 06eb6cc09fef..f9cdf04fe7c0 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c | |||
| @@ -42,16 +42,21 @@ static void __exit async_tx_exit(void) | |||
| 42 | async_dmaengine_put(); | 42 | async_dmaengine_put(); |
| 43 | } | 43 | } |
| 44 | 44 | ||
| 45 | module_init(async_tx_init); | ||
| 46 | module_exit(async_tx_exit); | ||
| 47 | |||
| 45 | /** | 48 | /** |
| 46 | * __async_tx_find_channel - find a channel to carry out the operation or let | 49 | * __async_tx_find_channel - find a channel to carry out the operation or let |
| 47 | * the transaction execute synchronously | 50 | * the transaction execute synchronously |
| 48 | * @depend_tx: transaction dependency | 51 | * @submit: transaction dependency and submission modifiers |
| 49 | * @tx_type: transaction type | 52 | * @tx_type: transaction type |
| 50 | */ | 53 | */ |
| 51 | struct dma_chan * | 54 | struct dma_chan * |
| 52 | __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, | 55 | __async_tx_find_channel(struct async_submit_ctl *submit, |
| 53 | enum dma_transaction_type tx_type) | 56 | enum dma_transaction_type tx_type) |
| 54 | { | 57 | { |
| 58 | struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; | ||
| 59 | |||
| 55 | /* see if we can keep the chain on one channel */ | 60 | /* see if we can keep the chain on one channel */ |
| 56 | if (depend_tx && | 61 | if (depend_tx && |
| 57 | dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) | 62 | dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) |
| @@ -59,17 +64,6 @@ __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, | |||
| 59 | return async_dma_find_channel(tx_type); | 64 | return async_dma_find_channel(tx_type); |
| 60 | } | 65 | } |
| 61 | EXPORT_SYMBOL_GPL(__async_tx_find_channel); | 66 | EXPORT_SYMBOL_GPL(__async_tx_find_channel); |
| 62 | #else | ||
| 63 | static int __init async_tx_init(void) | ||
| 64 | { | ||
| 65 | printk(KERN_INFO "async_tx: api initialized (sync-only)\n"); | ||
| 66 | return 0; | ||
| 67 | } | ||
| 68 | |||
| 69 | static void __exit async_tx_exit(void) | ||
| 70 | { | ||
| 71 | do { } while (0); | ||
| 72 | } | ||
| 73 | #endif | 67 | #endif |
| 74 | 68 | ||
| 75 | 69 | ||
| @@ -83,10 +77,14 @@ static void | |||
| 83 | async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | 77 | async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, |
| 84 | struct dma_async_tx_descriptor *tx) | 78 | struct dma_async_tx_descriptor *tx) |
| 85 | { | 79 | { |
| 86 | struct dma_chan *chan; | 80 | struct dma_chan *chan = depend_tx->chan; |
| 87 | struct dma_device *device; | 81 | struct dma_device *device = chan->device; |
| 88 | struct dma_async_tx_descriptor *intr_tx = (void *) ~0; | 82 | struct dma_async_tx_descriptor *intr_tx = (void *) ~0; |
| 89 | 83 | ||
| 84 | #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH | ||
| 85 | BUG(); | ||
| 86 | #endif | ||
| 87 | |||
| 90 | /* first check to see if we can still append to depend_tx */ | 88 | /* first check to see if we can still append to depend_tx */ |
| 91 | spin_lock_bh(&depend_tx->lock); | 89 | spin_lock_bh(&depend_tx->lock); |
| 92 | if (depend_tx->parent && depend_tx->chan == tx->chan) { | 90 | if (depend_tx->parent && depend_tx->chan == tx->chan) { |
| @@ -96,11 +94,11 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | |||
| 96 | } | 94 | } |
| 97 | spin_unlock_bh(&depend_tx->lock); | 95 | spin_unlock_bh(&depend_tx->lock); |
| 98 | 96 | ||
| 99 | if (!intr_tx) | 97 | /* attached dependency, flush the parent channel */ |
| 98 | if (!intr_tx) { | ||
| 99 | device->device_issue_pending(chan); | ||
| 100 | return; | 100 | return; |
| 101 | 101 | } | |
| 102 | chan = depend_tx->chan; | ||
| 103 | device = chan->device; | ||
| 104 | 102 | ||
| 105 | /* see if we can schedule an interrupt | 103 | /* see if we can schedule an interrupt |
| 106 | * otherwise poll for completion | 104 | * otherwise poll for completion |
| @@ -134,6 +132,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | |||
| 134 | intr_tx->tx_submit(intr_tx); | 132 | intr_tx->tx_submit(intr_tx); |
| 135 | async_tx_ack(intr_tx); | 133 | async_tx_ack(intr_tx); |
| 136 | } | 134 | } |
| 135 | device->device_issue_pending(chan); | ||
| 137 | } else { | 136 | } else { |
| 138 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) | 137 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) |
| 139 | panic("%s: DMA_ERROR waiting for depend_tx\n", | 138 | panic("%s: DMA_ERROR waiting for depend_tx\n", |
| @@ -144,13 +143,14 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | |||
| 144 | 143 | ||
| 145 | 144 | ||
| 146 | /** | 145 | /** |
| 147 | * submit_disposition - while holding depend_tx->lock we must avoid submitting | 146 | * submit_disposition - flags for routing an incoming operation |
| 148 | * new operations to prevent a circular locking dependency with | ||
| 149 | * drivers that already hold a channel lock when calling | ||
| 150 | * async_tx_run_dependencies. | ||
| 151 | * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock | 147 | * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock |
| 152 | * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch | 148 | * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch |
| 153 | * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly | 149 | * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly |
| 150 | * | ||
| 151 | * while holding depend_tx->lock we must avoid submitting new operations | ||
| 152 | * to prevent a circular locking dependency with drivers that already | ||
| 153 | * hold a channel lock when calling async_tx_run_dependencies. | ||
| 154 | */ | 154 | */ |
| 155 | enum submit_disposition { | 155 | enum submit_disposition { |
| 156 | ASYNC_TX_SUBMITTED, | 156 | ASYNC_TX_SUBMITTED, |
| @@ -160,11 +160,12 @@ enum submit_disposition { | |||
| 160 | 160 | ||
| 161 | void | 161 | void |
| 162 | async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | 162 | async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, |
| 163 | enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, | 163 | struct async_submit_ctl *submit) |
| 164 | dma_async_tx_callback cb_fn, void *cb_param) | ||
| 165 | { | 164 | { |
| 166 | tx->callback = cb_fn; | 165 | struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; |
| 167 | tx->callback_param = cb_param; | 166 | |
| 167 | tx->callback = submit->cb_fn; | ||
| 168 | tx->callback_param = submit->cb_param; | ||
| 168 | 169 | ||
| 169 | if (depend_tx) { | 170 | if (depend_tx) { |
| 170 | enum submit_disposition s; | 171 | enum submit_disposition s; |
| @@ -220,30 +221,29 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | |||
| 220 | tx->tx_submit(tx); | 221 | tx->tx_submit(tx); |
| 221 | } | 222 | } |
| 222 | 223 | ||
| 223 | if (flags & ASYNC_TX_ACK) | 224 | if (submit->flags & ASYNC_TX_ACK) |
| 224 | async_tx_ack(tx); | 225 | async_tx_ack(tx); |
| 225 | 226 | ||
| 226 | if (depend_tx && (flags & ASYNC_TX_DEP_ACK)) | 227 | if (depend_tx) |
| 227 | async_tx_ack(depend_tx); | 228 | async_tx_ack(depend_tx); |
| 228 | } | 229 | } |
| 229 | EXPORT_SYMBOL_GPL(async_tx_submit); | 230 | EXPORT_SYMBOL_GPL(async_tx_submit); |
| 230 | 231 | ||
| 231 | /** | 232 | /** |
| 232 | * async_trigger_callback - schedules the callback function to be run after | 233 | * async_trigger_callback - schedules the callback function to be run |
| 233 | * any dependent operations have been completed. | 234 | * @submit: submission and completion parameters |
| 234 | * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK | 235 | * |
| 235 | * @depend_tx: 'callback' requires the completion of this transaction | 236 | * honored flags: ASYNC_TX_ACK |
| 236 | * @cb_fn: function to call after depend_tx completes | 237 | * |
| 237 | * @cb_param: parameter to pass to the callback routine | 238 | * The callback is run after any dependent operations have completed. |
| 238 | */ | 239 | */ |
| 239 | struct dma_async_tx_descriptor * | 240 | struct dma_async_tx_descriptor * |
| 240 | async_trigger_callback(enum async_tx_flags flags, | 241 | async_trigger_callback(struct async_submit_ctl *submit) |
| 241 | struct dma_async_tx_descriptor *depend_tx, | ||
| 242 | dma_async_tx_callback cb_fn, void *cb_param) | ||
| 243 | { | 242 | { |
| 244 | struct dma_chan *chan; | 243 | struct dma_chan *chan; |
| 245 | struct dma_device *device; | 244 | struct dma_device *device; |
| 246 | struct dma_async_tx_descriptor *tx; | 245 | struct dma_async_tx_descriptor *tx; |
| 246 | struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; | ||
| 247 | 247 | ||
| 248 | if (depend_tx) { | 248 | if (depend_tx) { |
| 249 | chan = depend_tx->chan; | 249 | chan = depend_tx->chan; |
| @@ -262,14 +262,14 @@ async_trigger_callback(enum async_tx_flags flags, | |||
| 262 | if (tx) { | 262 | if (tx) { |
| 263 | pr_debug("%s: (async)\n", __func__); | 263 | pr_debug("%s: (async)\n", __func__); |
| 264 | 264 | ||
| 265 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); | 265 | async_tx_submit(chan, tx, submit); |
| 266 | } else { | 266 | } else { |
| 267 | pr_debug("%s: (sync)\n", __func__); | 267 | pr_debug("%s: (sync)\n", __func__); |
| 268 | 268 | ||
| 269 | /* wait for any prerequisite operations */ | 269 | /* wait for any prerequisite operations */ |
| 270 | async_tx_quiesce(&depend_tx); | 270 | async_tx_quiesce(&submit->depend_tx); |
| 271 | 271 | ||
| 272 | async_tx_sync_epilog(cb_fn, cb_param); | 272 | async_tx_sync_epilog(submit); |
| 273 | } | 273 | } |
| 274 | 274 | ||
| 275 | return tx; | 275 | return tx; |
| @@ -295,9 +295,6 @@ void async_tx_quiesce(struct dma_async_tx_descriptor **tx) | |||
| 295 | } | 295 | } |
| 296 | EXPORT_SYMBOL_GPL(async_tx_quiesce); | 296 | EXPORT_SYMBOL_GPL(async_tx_quiesce); |
| 297 | 297 | ||
| 298 | module_init(async_tx_init); | ||
| 299 | module_exit(async_tx_exit); | ||
| 300 | |||
| 301 | MODULE_AUTHOR("Intel Corporation"); | 298 | MODULE_AUTHOR("Intel Corporation"); |
| 302 | MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API"); | 299 | MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API"); |
| 303 | MODULE_LICENSE("GPL"); | 300 | MODULE_LICENSE("GPL"); |
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 90dd3f8bd283..079ae8ca590b 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c | |||
| @@ -33,55 +33,57 @@ | |||
| 33 | /* do_async_xor - dma map the pages and perform the xor with an engine */ | 33 | /* do_async_xor - dma map the pages and perform the xor with an engine */ |
| 34 | static __async_inline struct dma_async_tx_descriptor * | 34 | static __async_inline struct dma_async_tx_descriptor * |
| 35 | do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, | 35 | do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, |
| 36 | unsigned int offset, int src_cnt, size_t len, | 36 | unsigned int offset, int src_cnt, size_t len, dma_addr_t *dma_src, |
| 37 | enum async_tx_flags flags, | 37 | struct async_submit_ctl *submit) |
| 38 | struct dma_async_tx_descriptor *depend_tx, | ||
| 39 | dma_async_tx_callback cb_fn, void *cb_param) | ||
| 40 | { | 38 | { |
| 41 | struct dma_device *dma = chan->device; | 39 | struct dma_device *dma = chan->device; |
| 42 | dma_addr_t *dma_src = (dma_addr_t *) src_list; | ||
| 43 | struct dma_async_tx_descriptor *tx = NULL; | 40 | struct dma_async_tx_descriptor *tx = NULL; |
| 44 | int src_off = 0; | 41 | int src_off = 0; |
| 45 | int i; | 42 | int i; |
| 46 | dma_async_tx_callback _cb_fn; | 43 | dma_async_tx_callback cb_fn_orig = submit->cb_fn; |
| 47 | void *_cb_param; | 44 | void *cb_param_orig = submit->cb_param; |
| 48 | enum async_tx_flags async_flags; | 45 | enum async_tx_flags flags_orig = submit->flags; |
| 49 | enum dma_ctrl_flags dma_flags; | 46 | enum dma_ctrl_flags dma_flags; |
| 50 | int xor_src_cnt; | 47 | int xor_src_cnt = 0; |
| 51 | dma_addr_t dma_dest; | 48 | dma_addr_t dma_dest; |
| 52 | 49 | ||
| 53 | /* map the dest bidrectional in case it is re-used as a source */ | 50 | /* map the dest bidrectional in case it is re-used as a source */ |
| 54 | dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL); | 51 | dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL); |
| 55 | for (i = 0; i < src_cnt; i++) { | 52 | for (i = 0; i < src_cnt; i++) { |
| 56 | /* only map the dest once */ | 53 | /* only map the dest once */ |
| 54 | if (!src_list[i]) | ||
| 55 | continue; | ||
| 57 | if (unlikely(src_list[i] == dest)) { | 56 | if (unlikely(src_list[i] == dest)) { |
| 58 | dma_src[i] = dma_dest; | 57 | dma_src[xor_src_cnt++] = dma_dest; |
| 59 | continue; | 58 | continue; |
| 60 | } | 59 | } |
| 61 | dma_src[i] = dma_map_page(dma->dev, src_list[i], offset, | 60 | dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset, |
| 62 | len, DMA_TO_DEVICE); | 61 | len, DMA_TO_DEVICE); |
| 63 | } | 62 | } |
| 63 | src_cnt = xor_src_cnt; | ||
| 64 | 64 | ||
| 65 | while (src_cnt) { | 65 | while (src_cnt) { |
| 66 | async_flags = flags; | 66 | submit->flags = flags_orig; |
| 67 | dma_flags = 0; | 67 | dma_flags = 0; |
| 68 | xor_src_cnt = min(src_cnt, dma->max_xor); | 68 | xor_src_cnt = min(src_cnt, (int)dma->max_xor); |
| 69 | /* if we are submitting additional xors, leave the chain open, | 69 | /* if we are submitting additional xors, leave the chain open, |
| 70 | * clear the callback parameters, and leave the destination | 70 | * clear the callback parameters, and leave the destination |
| 71 | * buffer mapped | 71 | * buffer mapped |
| 72 | */ | 72 | */ |
| 73 | if (src_cnt > xor_src_cnt) { | 73 | if (src_cnt > xor_src_cnt) { |
| 74 | async_flags &= ~ASYNC_TX_ACK; | 74 | submit->flags &= ~ASYNC_TX_ACK; |
| 75 | submit->flags |= ASYNC_TX_FENCE; | ||
| 75 | dma_flags = DMA_COMPL_SKIP_DEST_UNMAP; | 76 | dma_flags = DMA_COMPL_SKIP_DEST_UNMAP; |
| 76 | _cb_fn = NULL; | 77 | submit->cb_fn = NULL; |
| 77 | _cb_param = NULL; | 78 | submit->cb_param = NULL; |
| 78 | } else { | 79 | } else { |
| 79 | _cb_fn = cb_fn; | 80 | submit->cb_fn = cb_fn_orig; |
| 80 | _cb_param = cb_param; | 81 | submit->cb_param = cb_param_orig; |
| 81 | } | 82 | } |
| 82 | if (_cb_fn) | 83 | if (submit->cb_fn) |
| 83 | dma_flags |= DMA_PREP_INTERRUPT; | 84 | dma_flags |= DMA_PREP_INTERRUPT; |
| 84 | 85 | if (submit->flags & ASYNC_TX_FENCE) | |
| 86 | dma_flags |= DMA_PREP_FENCE; | ||
| 85 | /* Since we have clobbered the src_list we are committed | 87 | /* Since we have clobbered the src_list we are committed |
| 86 | * to doing this asynchronously. Drivers force forward progress | 88 | * to doing this asynchronously. Drivers force forward progress |
| 87 | * in case they can not provide a descriptor | 89 | * in case they can not provide a descriptor |
| @@ -90,7 +92,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, | |||
| 90 | xor_src_cnt, len, dma_flags); | 92 | xor_src_cnt, len, dma_flags); |
| 91 | 93 | ||
| 92 | if (unlikely(!tx)) | 94 | if (unlikely(!tx)) |
| 93 | async_tx_quiesce(&depend_tx); | 95 | async_tx_quiesce(&submit->depend_tx); |
| 94 | 96 | ||
| 95 | /* spin wait for the preceeding transactions to complete */ | 97 | /* spin wait for the preceeding transactions to complete */ |
| 96 | while (unlikely(!tx)) { | 98 | while (unlikely(!tx)) { |
| @@ -101,11 +103,8 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, | |||
| 101 | dma_flags); | 103 | dma_flags); |
| 102 | } | 104 | } |
| 103 | 105 | ||
| 104 | async_tx_submit(chan, tx, async_flags, depend_tx, _cb_fn, | 106 | async_tx_submit(chan, tx, submit); |
| 105 | _cb_param); | 107 | submit->depend_tx = tx; |
| 106 | |||
| 107 | depend_tx = tx; | ||
| 108 | flags |= ASYNC_TX_DEP_ACK; | ||
| 109 | 108 | ||
| 110 | if (src_cnt > xor_src_cnt) { | 109 | if (src_cnt > xor_src_cnt) { |
| 111 | /* drop completed sources */ | 110 | /* drop completed sources */ |
| @@ -124,23 +123,28 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, | |||
| 124 | 123 | ||
| 125 | static void | 124 | static void |
| 126 | do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, | 125 | do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, |
| 127 | int src_cnt, size_t len, enum async_tx_flags flags, | 126 | int src_cnt, size_t len, struct async_submit_ctl *submit) |
| 128 | dma_async_tx_callback cb_fn, void *cb_param) | ||
| 129 | { | 127 | { |
| 130 | int i; | 128 | int i; |
| 131 | int xor_src_cnt; | 129 | int xor_src_cnt = 0; |
| 132 | int src_off = 0; | 130 | int src_off = 0; |
| 133 | void *dest_buf; | 131 | void *dest_buf; |
| 134 | void **srcs = (void **) src_list; | 132 | void **srcs; |
| 135 | 133 | ||
| 136 | /* reuse the 'src_list' array to convert to buffer pointers */ | 134 | if (submit->scribble) |
| 137 | for (i = 0; i < src_cnt; i++) | 135 | srcs = submit->scribble; |
| 138 | srcs[i] = page_address(src_list[i]) + offset; | 136 | else |
| 137 | srcs = (void **) src_list; | ||
| 139 | 138 | ||
| 139 | /* convert to buffer pointers */ | ||
| 140 | for (i = 0; i < src_cnt; i++) | ||
| 141 | if (src_list[i]) | ||
| 142 | srcs[xor_src_cnt++] = page_address(src_list[i]) + offset; | ||
| 143 | src_cnt = xor_src_cnt; | ||
| 140 | /* set destination address */ | 144 | /* set destination address */ |
| 141 | dest_buf = page_address(dest) + offset; | 145 | dest_buf = page_address(dest) + offset; |
| 142 | 146 | ||
| 143 | if (flags & ASYNC_TX_XOR_ZERO_DST) | 147 | if (submit->flags & ASYNC_TX_XOR_ZERO_DST) |
| 144 | memset(dest_buf, 0, len); | 148 | memset(dest_buf, 0, len); |
| 145 | 149 | ||
| 146 | while (src_cnt > 0) { | 150 | while (src_cnt > 0) { |
| @@ -153,61 +157,70 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, | |||
| 153 | src_off += xor_src_cnt; | 157 | src_off += xor_src_cnt; |
| 154 | } | 158 | } |
| 155 | 159 | ||
| 156 | async_tx_sync_epilog(cb_fn, cb_param); | 160 | async_tx_sync_epilog(submit); |
| 157 | } | 161 | } |
| 158 | 162 | ||
| 159 | /** | 163 | /** |
| 160 | * async_xor - attempt to xor a set of blocks with a dma engine. | 164 | * async_xor - attempt to xor a set of blocks with a dma engine. |
| 161 | * xor_blocks always uses the dest as a source so the ASYNC_TX_XOR_ZERO_DST | ||
| 162 | * flag must be set to not include dest data in the calculation. The | ||
| 163 | * assumption with dma eninges is that they only use the destination | ||
| 164 | * buffer as a source when it is explicity specified in the source list. | ||
| 165 | * @dest: destination page | 165 | * @dest: destination page |
| 166 | * @src_list: array of source pages (if the dest is also a source it must be | 166 | * @src_list: array of source pages |
| 167 | * at index zero). The contents of this array may be overwritten. | 167 | * @offset: common src/dst offset to start transaction |
| 168 | * @offset: offset in pages to start transaction | ||
| 169 | * @src_cnt: number of source pages | 168 | * @src_cnt: number of source pages |
| 170 | * @len: length in bytes | 169 | * @len: length in bytes |
| 171 | * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST, | 170 | * @submit: submission / completion modifiers |
| 172 | * ASYNC_TX_ACK, ASYNC_TX_DEP_ACK | 171 | * |
| 173 | * @depend_tx: xor depends on the result of this transaction. | 172 | * honored flags: ASYNC_TX_ACK, ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DST |
| 174 | * @cb_fn: function to call when the xor completes | 173 | * |
| 175 | * @cb_param: parameter to pass to the callback routine | 174 | * xor_blocks always uses the dest as a source so the |
| 175 | * ASYNC_TX_XOR_ZERO_DST flag must be set to not include dest data in | ||
| 176 | * the calculation. The assumption with dma eninges is that they only | ||
| 177 | * use the destination buffer as a source when it is explicity specified | ||
| 178 | * in the source list. | ||
| 179 | * | ||
| 180 | * src_list note: if the dest is also a source it must be at index zero. | ||
| 181 | * The contents of this array will be overwritten if a scribble region | ||
| 182 | * is not specified. | ||
| 176 | */ | 183 | */ |
| 177 | struct dma_async_tx_descriptor * | 184 | struct dma_async_tx_descriptor * |
| 178 | async_xor(struct page *dest, struct page **src_list, unsigned int offset, | 185 | async_xor(struct page *dest, struct page **src_list, unsigned int offset, |
| 179 | int src_cnt, size_t len, enum async_tx_flags flags, | 186 | int src_cnt, size_t len, struct async_submit_ctl *submit) |
| 180 | struct dma_async_tx_descriptor *depend_tx, | ||
| 181 | dma_async_tx_callback cb_fn, void *cb_param) | ||
| 182 | { | 187 | { |
| 183 | struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR, | 188 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR, |
| 184 | &dest, 1, src_list, | 189 | &dest, 1, src_list, |
| 185 | src_cnt, len); | 190 | src_cnt, len); |
| 191 | dma_addr_t *dma_src = NULL; | ||
| 192 | |||
| 186 | BUG_ON(src_cnt <= 1); | 193 | BUG_ON(src_cnt <= 1); |
| 187 | 194 | ||
| 188 | if (chan) { | 195 | if (submit->scribble) |
| 196 | dma_src = submit->scribble; | ||
| 197 | else if (sizeof(dma_addr_t) <= sizeof(struct page *)) | ||
| 198 | dma_src = (dma_addr_t *) src_list; | ||
| 199 | |||
| 200 | if (dma_src && chan && is_dma_xor_aligned(chan->device, offset, 0, len)) { | ||
| 189 | /* run the xor asynchronously */ | 201 | /* run the xor asynchronously */ |
| 190 | pr_debug("%s (async): len: %zu\n", __func__, len); | 202 | pr_debug("%s (async): len: %zu\n", __func__, len); |
| 191 | 203 | ||
| 192 | return do_async_xor(chan, dest, src_list, offset, src_cnt, len, | 204 | return do_async_xor(chan, dest, src_list, offset, src_cnt, len, |
| 193 | flags, depend_tx, cb_fn, cb_param); | 205 | dma_src, submit); |
| 194 | } else { | 206 | } else { |
| 195 | /* run the xor synchronously */ | 207 | /* run the xor synchronously */ |
| 196 | pr_debug("%s (sync): len: %zu\n", __func__, len); | 208 | pr_debug("%s (sync): len: %zu\n", __func__, len); |
| 209 | WARN_ONCE(chan, "%s: no space for dma address conversion\n", | ||
| 210 | __func__); | ||
| 197 | 211 | ||
| 198 | /* in the sync case the dest is an implied source | 212 | /* in the sync case the dest is an implied source |
| 199 | * (assumes the dest is the first source) | 213 | * (assumes the dest is the first source) |
| 200 | */ | 214 | */ |
| 201 | if (flags & ASYNC_TX_XOR_DROP_DST) { | 215 | if (submit->flags & ASYNC_TX_XOR_DROP_DST) { |
| 202 | src_cnt--; | 216 | src_cnt--; |
| 203 | src_list++; | 217 | src_list++; |
| 204 | } | 218 | } |
| 205 | 219 | ||
| 206 | /* wait for any prerequisite operations */ | 220 | /* wait for any prerequisite operations */ |
| 207 | async_tx_quiesce(&depend_tx); | 221 | async_tx_quiesce(&submit->depend_tx); |
| 208 | 222 | ||
| 209 | do_sync_xor(dest, src_list, offset, src_cnt, len, | 223 | do_sync_xor(dest, src_list, offset, src_cnt, len, submit); |
| 210 | flags, cb_fn, cb_param); | ||
| 211 | 224 | ||
| 212 | return NULL; | 225 | return NULL; |
| 213 | } | 226 | } |
| @@ -221,105 +234,104 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len) | |||
| 221 | memcmp(a, a + 4, len - 4) == 0); | 234 | memcmp(a, a + 4, len - 4) == 0); |
| 222 | } | 235 | } |
| 223 | 236 | ||
| 237 | static inline struct dma_chan * | ||
| 238 | xor_val_chan(struct async_submit_ctl *submit, struct page *dest, | ||
| 239 | struct page **src_list, int src_cnt, size_t len) | ||
| 240 | { | ||
| 241 | #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA | ||
| 242 | return NULL; | ||
| 243 | #endif | ||
| 244 | return async_tx_find_channel(submit, DMA_XOR_VAL, &dest, 1, src_list, | ||
| 245 | src_cnt, len); | ||
| 246 | } | ||
| 247 | |||
| 224 | /** | 248 | /** |
| 225 | * async_xor_zero_sum - attempt a xor parity check with a dma engine. | 249 | * async_xor_val - attempt a xor parity check with a dma engine. |
| 226 | * @dest: destination page used if the xor is performed synchronously | 250 | * @dest: destination page used if the xor is performed synchronously |
| 227 | * @src_list: array of source pages. The dest page must be listed as a source | 251 | * @src_list: array of source pages |
| 228 | * at index zero. The contents of this array may be overwritten. | ||
| 229 | * @offset: offset in pages to start transaction | 252 | * @offset: offset in pages to start transaction |
| 230 | * @src_cnt: number of source pages | 253 | * @src_cnt: number of source pages |
| 231 | * @len: length in bytes | 254 | * @len: length in bytes |
| 232 | * @result: 0 if sum == 0 else non-zero | 255 | * @result: 0 if sum == 0 else non-zero |
| 233 | * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK | 256 | * @submit: submission / completion modifiers |
| 234 | * @depend_tx: xor depends on the result of this transaction. | 257 | * |
| 235 | * @cb_fn: function to call when the xor completes | 258 | * honored flags: ASYNC_TX_ACK |
| 236 | * @cb_param: parameter to pass to the callback routine | 259 | * |
| 260 | * src_list note: if the dest is also a source it must be at index zero. | ||
| 261 | * The contents of this array will be overwritten if a scribble region | ||
| 262 | * is not specified. | ||
| 237 | */ | 263 | */ |
| 238 | struct dma_async_tx_descriptor * | 264 | struct dma_async_tx_descriptor * |
| 239 | async_xor_zero_sum(struct page *dest, struct page **src_list, | 265 | async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, |
| 240 | unsigned int offset, int src_cnt, size_t len, | 266 | int src_cnt, size_t len, enum sum_check_flags *result, |
| 241 | u32 *result, enum async_tx_flags flags, | 267 | struct async_submit_ctl *submit) |
| 242 | struct dma_async_tx_descriptor *depend_tx, | ||
| 243 | dma_async_tx_callback cb_fn, void *cb_param) | ||
| 244 | { | 268 | { |
| 245 | struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_ZERO_SUM, | 269 | struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len); |
| 246 | &dest, 1, src_list, | ||
| 247 | src_cnt, len); | ||
| 248 | struct dma_device *device = chan ? chan->device : NULL; | 270 | struct dma_device *device = chan ? chan->device : NULL; |
| 249 | struct dma_async_tx_descriptor *tx = NULL; | 271 | struct dma_async_tx_descriptor *tx = NULL; |
| 272 | dma_addr_t *dma_src = NULL; | ||
| 250 | 273 | ||
| 251 | BUG_ON(src_cnt <= 1); | 274 | BUG_ON(src_cnt <= 1); |
| 252 | 275 | ||
| 253 | if (device && src_cnt <= device->max_xor) { | 276 | if (submit->scribble) |
| 254 | dma_addr_t *dma_src = (dma_addr_t *) src_list; | 277 | dma_src = submit->scribble; |
| 255 | unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; | 278 | else if (sizeof(dma_addr_t) <= sizeof(struct page *)) |
| 279 | dma_src = (dma_addr_t *) src_list; | ||
| 280 | |||
| 281 | if (dma_src && device && src_cnt <= device->max_xor && | ||
| 282 | is_dma_xor_aligned(device, offset, 0, len)) { | ||
| 283 | unsigned long dma_prep_flags = 0; | ||
| 256 | int i; | 284 | int i; |
| 257 | 285 | ||
| 258 | pr_debug("%s: (async) len: %zu\n", __func__, len); | 286 | pr_debug("%s: (async) len: %zu\n", __func__, len); |
| 259 | 287 | ||
| 288 | if (submit->cb_fn) | ||
| 289 | dma_prep_flags |= DMA_PREP_INTERRUPT; | ||
| 290 | if (submit->flags & ASYNC_TX_FENCE) | ||
| 291 | dma_prep_flags |= DMA_PREP_FENCE; | ||
| 260 | for (i = 0; i < src_cnt; i++) | 292 | for (i = 0; i < src_cnt; i++) |
| 261 | dma_src[i] = dma_map_page(device->dev, src_list[i], | 293 | dma_src[i] = dma_map_page(device->dev, src_list[i], |
| 262 | offset, len, DMA_TO_DEVICE); | 294 | offset, len, DMA_TO_DEVICE); |
| 263 | 295 | ||
| 264 | tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt, | 296 | tx = device->device_prep_dma_xor_val(chan, dma_src, src_cnt, |
| 265 | len, result, | 297 | len, result, |
| 266 | dma_prep_flags); | 298 | dma_prep_flags); |
| 267 | if (unlikely(!tx)) { | 299 | if (unlikely(!tx)) { |
| 268 | async_tx_quiesce(&depend_tx); | 300 | async_tx_quiesce(&submit->depend_tx); |
| 269 | 301 | ||
| 270 | while (!tx) { | 302 | while (!tx) { |
| 271 | dma_async_issue_pending(chan); | 303 | dma_async_issue_pending(chan); |
| 272 | tx = device->device_prep_dma_zero_sum(chan, | 304 | tx = device->device_prep_dma_xor_val(chan, |
| 273 | dma_src, src_cnt, len, result, | 305 | dma_src, src_cnt, len, result, |
| 274 | dma_prep_flags); | 306 | dma_prep_flags); |
| 275 | } | 307 | } |
| 276 | } | 308 | } |
| 277 | 309 | ||
| 278 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); | 310 | async_tx_submit(chan, tx, submit); |
| 279 | } else { | 311 | } else { |
| 280 | unsigned long xor_flags = flags; | 312 | enum async_tx_flags flags_orig = submit->flags; |
| 281 | 313 | ||
| 282 | pr_debug("%s: (sync) len: %zu\n", __func__, len); | 314 | pr_debug("%s: (sync) len: %zu\n", __func__, len); |
| 315 | WARN_ONCE(device && src_cnt <= device->max_xor, | ||
| 316 | "%s: no space for dma address conversion\n", | ||
| 317 | __func__); | ||
| 283 | 318 | ||
| 284 | xor_flags |= ASYNC_TX_XOR_DROP_DST; | 319 | submit->flags |= ASYNC_TX_XOR_DROP_DST; |
| 285 | xor_flags &= ~ASYNC_TX_ACK; | 320 | submit->flags &= ~ASYNC_TX_ACK; |
| 286 | 321 | ||
| 287 | tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags, | 322 | tx = async_xor(dest, src_list, offset, src_cnt, len, submit); |
| 288 | depend_tx, NULL, NULL); | ||
| 289 | 323 | ||
| 290 | async_tx_quiesce(&tx); | 324 | async_tx_quiesce(&tx); |
| 291 | 325 | ||
| 292 | *result = page_is_zero(dest, offset, len) ? 0 : 1; | 326 | *result = !page_is_zero(dest, offset, len) << SUM_CHECK_P; |
| 293 | 327 | ||
| 294 | async_tx_sync_epilog(cb_fn, cb_param); | 328 | async_tx_sync_epilog(submit); |
| 329 | submit->flags = flags_orig; | ||
| 295 | } | 330 | } |
| 296 | 331 | ||
| 297 | return tx; | 332 | return tx; |
| 298 | } | 333 | } |
| 299 | EXPORT_SYMBOL_GPL(async_xor_zero_sum); | 334 | EXPORT_SYMBOL_GPL(async_xor_val); |
| 300 | |||
| 301 | static int __init async_xor_init(void) | ||
| 302 | { | ||
| 303 | #ifdef CONFIG_ASYNC_TX_DMA | ||
| 304 | /* To conserve stack space the input src_list (array of page pointers) | ||
| 305 | * is reused to hold the array of dma addresses passed to the driver. | ||
| 306 | * This conversion is only possible when dma_addr_t is less than the | ||
| 307 | * the size of a pointer. HIGHMEM64G is known to violate this | ||
| 308 | * assumption. | ||
| 309 | */ | ||
| 310 | BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(struct page *)); | ||
| 311 | #endif | ||
| 312 | |||
| 313 | return 0; | ||
| 314 | } | ||
| 315 | |||
| 316 | static void __exit async_xor_exit(void) | ||
| 317 | { | ||
| 318 | do { } while (0); | ||
| 319 | } | ||
| 320 | |||
| 321 | module_init(async_xor_init); | ||
| 322 | module_exit(async_xor_exit); | ||
| 323 | 335 | ||
| 324 | MODULE_AUTHOR("Intel Corporation"); | 336 | MODULE_AUTHOR("Intel Corporation"); |
| 325 | MODULE_DESCRIPTION("asynchronous xor/xor-zero-sum api"); | 337 | MODULE_DESCRIPTION("asynchronous xor/xor-zero-sum api"); |
diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c new file mode 100644 index 000000000000..3ec27c7e62ea --- /dev/null +++ b/crypto/async_tx/raid6test.c | |||
| @@ -0,0 +1,240 @@ | |||
| 1 | /* | ||
| 2 | * asynchronous raid6 recovery self test | ||
| 3 | * Copyright (c) 2009, Intel Corporation. | ||
| 4 | * | ||
| 5 | * based on drivers/md/raid6test/test.c: | ||
| 6 | * Copyright 2002-2007 H. Peter Anvin | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License along with | ||
| 18 | * this program; if not, write to the Free Software Foundation, Inc., | ||
| 19 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 20 | * | ||
| 21 | */ | ||
| 22 | #include <linux/async_tx.h> | ||
| 23 | #include <linux/random.h> | ||
| 24 | |||
| 25 | #undef pr | ||
| 26 | #define pr(fmt, args...) pr_info("raid6test: " fmt, ##args) | ||
| 27 | |||
| 28 | #define NDISKS 16 /* Including P and Q */ | ||
| 29 | |||
| 30 | static struct page *dataptrs[NDISKS]; | ||
| 31 | static addr_conv_t addr_conv[NDISKS]; | ||
| 32 | static struct page *data[NDISKS+3]; | ||
| 33 | static struct page *spare; | ||
| 34 | static struct page *recovi; | ||
| 35 | static struct page *recovj; | ||
| 36 | |||
| 37 | static void callback(void *param) | ||
| 38 | { | ||
| 39 | struct completion *cmp = param; | ||
| 40 | |||
| 41 | complete(cmp); | ||
| 42 | } | ||
| 43 | |||
| 44 | static void makedata(int disks) | ||
| 45 | { | ||
| 46 | int i, j; | ||
| 47 | |||
| 48 | for (i = 0; i < disks; i++) { | ||
| 49 | for (j = 0; j < PAGE_SIZE/sizeof(u32); j += sizeof(u32)) { | ||
| 50 | u32 *p = page_address(data[i]) + j; | ||
| 51 | |||
| 52 | *p = random32(); | ||
| 53 | } | ||
| 54 | |||
| 55 | dataptrs[i] = data[i]; | ||
| 56 | } | ||
| 57 | } | ||
| 58 | |||
| 59 | static char disk_type(int d, int disks) | ||
| 60 | { | ||
| 61 | if (d == disks - 2) | ||
| 62 | return 'P'; | ||
| 63 | else if (d == disks - 1) | ||
| 64 | return 'Q'; | ||
| 65 | else | ||
| 66 | return 'D'; | ||
| 67 | } | ||
| 68 | |||
| 69 | /* Recover two failed blocks. */ | ||
| 70 | static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, struct page **ptrs) | ||
| 71 | { | ||
| 72 | struct async_submit_ctl submit; | ||
| 73 | struct completion cmp; | ||
| 74 | struct dma_async_tx_descriptor *tx = NULL; | ||
| 75 | enum sum_check_flags result = ~0; | ||
| 76 | |||
| 77 | if (faila > failb) | ||
| 78 | swap(faila, failb); | ||
| 79 | |||
| 80 | if (failb == disks-1) { | ||
| 81 | if (faila == disks-2) { | ||
| 82 | /* P+Q failure. Just rebuild the syndrome. */ | ||
| 83 | init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); | ||
| 84 | tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit); | ||
| 85 | } else { | ||
| 86 | struct page *blocks[disks]; | ||
| 87 | struct page *dest; | ||
| 88 | int count = 0; | ||
| 89 | int i; | ||
| 90 | |||
| 91 | /* data+Q failure. Reconstruct data from P, | ||
| 92 | * then rebuild syndrome | ||
| 93 | */ | ||
| 94 | for (i = disks; i-- ; ) { | ||
| 95 | if (i == faila || i == failb) | ||
| 96 | continue; | ||
| 97 | blocks[count++] = ptrs[i]; | ||
| 98 | } | ||
| 99 | dest = ptrs[faila]; | ||
| 100 | init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, | ||
| 101 | NULL, NULL, addr_conv); | ||
| 102 | tx = async_xor(dest, blocks, 0, count, bytes, &submit); | ||
| 103 | |||
| 104 | init_async_submit(&submit, 0, tx, NULL, NULL, addr_conv); | ||
| 105 | tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit); | ||
| 106 | } | ||
| 107 | } else { | ||
| 108 | if (failb == disks-2) { | ||
| 109 | /* data+P failure. */ | ||
| 110 | init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); | ||
| 111 | tx = async_raid6_datap_recov(disks, bytes, faila, ptrs, &submit); | ||
| 112 | } else { | ||
| 113 | /* data+data failure. */ | ||
| 114 | init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); | ||
| 115 | tx = async_raid6_2data_recov(disks, bytes, faila, failb, ptrs, &submit); | ||
| 116 | } | ||
| 117 | } | ||
| 118 | init_completion(&cmp); | ||
| 119 | init_async_submit(&submit, ASYNC_TX_ACK, tx, callback, &cmp, addr_conv); | ||
| 120 | tx = async_syndrome_val(ptrs, 0, disks, bytes, &result, spare, &submit); | ||
| 121 | async_tx_issue_pending(tx); | ||
| 122 | |||
| 123 | if (wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)) == 0) | ||
| 124 | pr("%s: timeout! (faila: %d failb: %d disks: %d)\n", | ||
| 125 | __func__, faila, failb, disks); | ||
| 126 | |||
| 127 | if (result != 0) | ||
| 128 | pr("%s: validation failure! faila: %d failb: %d sum_check_flags: %x\n", | ||
| 129 | __func__, faila, failb, result); | ||
| 130 | } | ||
| 131 | |||
| 132 | static int test_disks(int i, int j, int disks) | ||
| 133 | { | ||
| 134 | int erra, errb; | ||
| 135 | |||
| 136 | memset(page_address(recovi), 0xf0, PAGE_SIZE); | ||
| 137 | memset(page_address(recovj), 0xba, PAGE_SIZE); | ||
| 138 | |||
| 139 | dataptrs[i] = recovi; | ||
| 140 | dataptrs[j] = recovj; | ||
| 141 | |||
| 142 | raid6_dual_recov(disks, PAGE_SIZE, i, j, dataptrs); | ||
| 143 | |||
| 144 | erra = memcmp(page_address(data[i]), page_address(recovi), PAGE_SIZE); | ||
| 145 | errb = memcmp(page_address(data[j]), page_address(recovj), PAGE_SIZE); | ||
| 146 | |||
| 147 | pr("%s(%d, %d): faila=%3d(%c) failb=%3d(%c) %s\n", | ||
| 148 | __func__, i, j, i, disk_type(i, disks), j, disk_type(j, disks), | ||
| 149 | (!erra && !errb) ? "OK" : !erra ? "ERRB" : !errb ? "ERRA" : "ERRAB"); | ||
| 150 | |||
| 151 | dataptrs[i] = data[i]; | ||
| 152 | dataptrs[j] = data[j]; | ||
| 153 | |||
| 154 | return erra || errb; | ||
| 155 | } | ||
| 156 | |||
| 157 | static int test(int disks, int *tests) | ||
| 158 | { | ||
| 159 | struct dma_async_tx_descriptor *tx; | ||
| 160 | struct async_submit_ctl submit; | ||
| 161 | struct completion cmp; | ||
| 162 | int err = 0; | ||
| 163 | int i, j; | ||
| 164 | |||
| 165 | recovi = data[disks]; | ||
| 166 | recovj = data[disks+1]; | ||
| 167 | spare = data[disks+2]; | ||
| 168 | |||
| 169 | makedata(disks); | ||
| 170 | |||
| 171 | /* Nuke syndromes */ | ||
| 172 | memset(page_address(data[disks-2]), 0xee, PAGE_SIZE); | ||
| 173 | memset(page_address(data[disks-1]), 0xee, PAGE_SIZE); | ||
| 174 | |||
| 175 | /* Generate assumed good syndrome */ | ||
| 176 | init_completion(&cmp); | ||
| 177 | init_async_submit(&submit, ASYNC_TX_ACK, NULL, callback, &cmp, addr_conv); | ||
| 178 | tx = async_gen_syndrome(dataptrs, 0, disks, PAGE_SIZE, &submit); | ||
| 179 | async_tx_issue_pending(tx); | ||
| 180 | |||
| 181 | if (wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)) == 0) { | ||
| 182 | pr("error: initial gen_syndrome(%d) timed out\n", disks); | ||
| 183 | return 1; | ||
| 184 | } | ||
| 185 | |||
| 186 | pr("testing the %d-disk case...\n", disks); | ||
| 187 | for (i = 0; i < disks-1; i++) | ||
| 188 | for (j = i+1; j < disks; j++) { | ||
| 189 | (*tests)++; | ||
| 190 | err += test_disks(i, j, disks); | ||
| 191 | } | ||
| 192 | |||
| 193 | return err; | ||
| 194 | } | ||
| 195 | |||
| 196 | |||
| 197 | static int raid6_test(void) | ||
| 198 | { | ||
| 199 | int err = 0; | ||
| 200 | int tests = 0; | ||
| 201 | int i; | ||
| 202 | |||
| 203 | for (i = 0; i < NDISKS+3; i++) { | ||
| 204 | data[i] = alloc_page(GFP_KERNEL); | ||
| 205 | if (!data[i]) { | ||
| 206 | while (i--) | ||
| 207 | put_page(data[i]); | ||
| 208 | return -ENOMEM; | ||
| 209 | } | ||
| 210 | } | ||
| 211 | |||
| 212 | /* the 4-disk and 5-disk cases are special for the recovery code */ | ||
| 213 | if (NDISKS > 4) | ||
| 214 | err += test(4, &tests); | ||
| 215 | if (NDISKS > 5) | ||
| 216 | err += test(5, &tests); | ||
| 217 | err += test(NDISKS, &tests); | ||
| 218 | |||
| 219 | pr("\n"); | ||
| 220 | pr("complete (%d tests, %d failure%s)\n", | ||
| 221 | tests, err, err == 1 ? "" : "s"); | ||
| 222 | |||
| 223 | for (i = 0; i < NDISKS+3; i++) | ||
| 224 | put_page(data[i]); | ||
| 225 | |||
| 226 | return 0; | ||
| 227 | } | ||
| 228 | |||
| 229 | static void raid6_test_exit(void) | ||
| 230 | { | ||
| 231 | } | ||
| 232 | |||
| 233 | /* when compiled-in wait for drivers to load first (assumes dma drivers | ||
| 234 | * are also compliled-in) | ||
| 235 | */ | ||
| 236 | late_initcall(raid6_test); | ||
| 237 | module_exit(raid6_test_exit); | ||
| 238 | MODULE_AUTHOR("Dan Williams <dan.j.williams@intel.com>"); | ||
| 239 | MODULE_DESCRIPTION("asynchronous RAID-6 recovery self tests"); | ||
| 240 | MODULE_LICENSE("GPL"); | ||
diff --git a/crypto/authenc.c b/crypto/authenc.c index 5793b64c81a8..4d6f49a5daeb 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c | |||
| @@ -23,24 +23,36 @@ | |||
| 23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
| 24 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
| 25 | 25 | ||
| 26 | typedef u8 *(*authenc_ahash_t)(struct aead_request *req, unsigned int flags); | ||
| 27 | |||
| 26 | struct authenc_instance_ctx { | 28 | struct authenc_instance_ctx { |
| 27 | struct crypto_spawn auth; | 29 | struct crypto_ahash_spawn auth; |
| 28 | struct crypto_skcipher_spawn enc; | 30 | struct crypto_skcipher_spawn enc; |
| 29 | }; | 31 | }; |
| 30 | 32 | ||
| 31 | struct crypto_authenc_ctx { | 33 | struct crypto_authenc_ctx { |
| 32 | spinlock_t auth_lock; | 34 | unsigned int reqoff; |
| 33 | struct crypto_hash *auth; | 35 | struct crypto_ahash *auth; |
| 34 | struct crypto_ablkcipher *enc; | 36 | struct crypto_ablkcipher *enc; |
| 35 | }; | 37 | }; |
| 36 | 38 | ||
| 39 | struct authenc_request_ctx { | ||
| 40 | unsigned int cryptlen; | ||
| 41 | struct scatterlist *sg; | ||
| 42 | struct scatterlist asg[2]; | ||
| 43 | struct scatterlist cipher[2]; | ||
| 44 | crypto_completion_t complete; | ||
| 45 | crypto_completion_t update_complete; | ||
| 46 | char tail[]; | ||
| 47 | }; | ||
| 48 | |||
| 37 | static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, | 49 | static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, |
| 38 | unsigned int keylen) | 50 | unsigned int keylen) |
| 39 | { | 51 | { |
| 40 | unsigned int authkeylen; | 52 | unsigned int authkeylen; |
| 41 | unsigned int enckeylen; | 53 | unsigned int enckeylen; |
| 42 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | 54 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); |
| 43 | struct crypto_hash *auth = ctx->auth; | 55 | struct crypto_ahash *auth = ctx->auth; |
| 44 | struct crypto_ablkcipher *enc = ctx->enc; | 56 | struct crypto_ablkcipher *enc = ctx->enc; |
| 45 | struct rtattr *rta = (void *)key; | 57 | struct rtattr *rta = (void *)key; |
| 46 | struct crypto_authenc_key_param *param; | 58 | struct crypto_authenc_key_param *param; |
| @@ -64,11 +76,11 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, | |||
| 64 | 76 | ||
| 65 | authkeylen = keylen - enckeylen; | 77 | authkeylen = keylen - enckeylen; |
| 66 | 78 | ||
| 67 | crypto_hash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); | 79 | crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); |
| 68 | crypto_hash_set_flags(auth, crypto_aead_get_flags(authenc) & | 80 | crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) & |
| 69 | CRYPTO_TFM_REQ_MASK); | 81 | CRYPTO_TFM_REQ_MASK); |
| 70 | err = crypto_hash_setkey(auth, key, authkeylen); | 82 | err = crypto_ahash_setkey(auth, key, authkeylen); |
| 71 | crypto_aead_set_flags(authenc, crypto_hash_get_flags(auth) & | 83 | crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) & |
| 72 | CRYPTO_TFM_RES_MASK); | 84 | CRYPTO_TFM_RES_MASK); |
| 73 | 85 | ||
| 74 | if (err) | 86 | if (err) |
| @@ -103,40 +115,198 @@ static void authenc_chain(struct scatterlist *head, struct scatterlist *sg, | |||
| 103 | sg_mark_end(head); | 115 | sg_mark_end(head); |
| 104 | } | 116 | } |
| 105 | 117 | ||
| 106 | static u8 *crypto_authenc_hash(struct aead_request *req, unsigned int flags, | 118 | static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq, |
| 107 | struct scatterlist *cipher, | 119 | int err) |
| 108 | unsigned int cryptlen) | 120 | { |
| 121 | struct aead_request *req = areq->data; | ||
| 122 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | ||
| 123 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | ||
| 124 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
| 125 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
| 126 | |||
| 127 | if (err) | ||
| 128 | goto out; | ||
| 129 | |||
| 130 | ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result, | ||
| 131 | areq_ctx->cryptlen); | ||
| 132 | ahash_request_set_callback(ahreq, aead_request_flags(req) & | ||
| 133 | CRYPTO_TFM_REQ_MAY_SLEEP, | ||
| 134 | areq_ctx->complete, req); | ||
| 135 | |||
| 136 | err = crypto_ahash_finup(ahreq); | ||
| 137 | if (err) | ||
| 138 | goto out; | ||
| 139 | |||
| 140 | scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, | ||
| 141 | areq_ctx->cryptlen, | ||
| 142 | crypto_aead_authsize(authenc), 1); | ||
| 143 | |||
| 144 | out: | ||
| 145 | aead_request_complete(req, err); | ||
| 146 | } | ||
| 147 | |||
| 148 | static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err) | ||
| 149 | { | ||
| 150 | struct aead_request *req = areq->data; | ||
| 151 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | ||
| 152 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | ||
| 153 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
| 154 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
| 155 | |||
| 156 | if (err) | ||
| 157 | goto out; | ||
| 158 | |||
| 159 | scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, | ||
| 160 | areq_ctx->cryptlen, | ||
| 161 | crypto_aead_authsize(authenc), 1); | ||
| 162 | |||
| 163 | out: | ||
| 164 | aead_request_complete(req, err); | ||
| 165 | } | ||
| 166 | |||
| 167 | static void authenc_verify_ahash_update_done(struct crypto_async_request *areq, | ||
| 168 | int err) | ||
| 109 | { | 169 | { |
| 170 | u8 *ihash; | ||
| 171 | unsigned int authsize; | ||
| 172 | struct ablkcipher_request *abreq; | ||
| 173 | struct aead_request *req = areq->data; | ||
| 110 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 174 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
| 111 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | 175 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); |
| 112 | struct crypto_hash *auth = ctx->auth; | 176 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); |
| 113 | struct hash_desc desc = { | 177 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); |
| 114 | .tfm = auth, | 178 | |
| 115 | .flags = aead_request_flags(req) & flags, | 179 | if (err) |
| 116 | }; | 180 | goto out; |
| 117 | u8 *hash = aead_request_ctx(req); | 181 | |
| 182 | ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result, | ||
| 183 | areq_ctx->cryptlen); | ||
| 184 | ahash_request_set_callback(ahreq, aead_request_flags(req) & | ||
| 185 | CRYPTO_TFM_REQ_MAY_SLEEP, | ||
| 186 | areq_ctx->complete, req); | ||
| 187 | |||
| 188 | err = crypto_ahash_finup(ahreq); | ||
| 189 | if (err) | ||
| 190 | goto out; | ||
| 191 | |||
| 192 | authsize = crypto_aead_authsize(authenc); | ||
| 193 | ihash = ahreq->result + authsize; | ||
| 194 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | ||
| 195 | authsize, 0); | ||
| 196 | |||
| 197 | err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG: 0; | ||
| 198 | if (err) | ||
| 199 | goto out; | ||
| 200 | |||
| 201 | abreq = aead_request_ctx(req); | ||
| 202 | ablkcipher_request_set_tfm(abreq, ctx->enc); | ||
| 203 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | ||
| 204 | req->base.complete, req->base.data); | ||
| 205 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, | ||
| 206 | req->cryptlen, req->iv); | ||
| 207 | |||
| 208 | err = crypto_ablkcipher_decrypt(abreq); | ||
| 209 | |||
| 210 | out: | ||
| 211 | aead_request_complete(req, err); | ||
| 212 | } | ||
| 213 | |||
| 214 | static void authenc_verify_ahash_done(struct crypto_async_request *areq, | ||
| 215 | int err) | ||
| 216 | { | ||
| 217 | u8 *ihash; | ||
| 218 | unsigned int authsize; | ||
| 219 | struct ablkcipher_request *abreq; | ||
| 220 | struct aead_request *req = areq->data; | ||
| 221 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | ||
| 222 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | ||
| 223 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
| 224 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
| 225 | |||
| 226 | if (err) | ||
| 227 | goto out; | ||
| 228 | |||
| 229 | authsize = crypto_aead_authsize(authenc); | ||
| 230 | ihash = ahreq->result + authsize; | ||
| 231 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | ||
| 232 | authsize, 0); | ||
| 233 | |||
| 234 | err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG: 0; | ||
| 235 | if (err) | ||
| 236 | goto out; | ||
| 237 | |||
| 238 | abreq = aead_request_ctx(req); | ||
| 239 | ablkcipher_request_set_tfm(abreq, ctx->enc); | ||
| 240 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | ||
| 241 | req->base.complete, req->base.data); | ||
| 242 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, | ||
| 243 | req->cryptlen, req->iv); | ||
| 244 | |||
| 245 | err = crypto_ablkcipher_decrypt(abreq); | ||
| 246 | |||
| 247 | out: | ||
| 248 | aead_request_complete(req, err); | ||
| 249 | } | ||
| 250 | |||
| 251 | static u8 *crypto_authenc_ahash_fb(struct aead_request *req, unsigned int flags) | ||
| 252 | { | ||
| 253 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | ||
| 254 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | ||
| 255 | struct crypto_ahash *auth = ctx->auth; | ||
| 256 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
| 257 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
| 258 | u8 *hash = areq_ctx->tail; | ||
| 118 | int err; | 259 | int err; |
| 119 | 260 | ||
| 120 | hash = (u8 *)ALIGN((unsigned long)hash + crypto_hash_alignmask(auth), | 261 | hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth), |
| 121 | crypto_hash_alignmask(auth) + 1); | 262 | crypto_ahash_alignmask(auth) + 1); |
| 263 | |||
| 264 | ahash_request_set_tfm(ahreq, auth); | ||
| 122 | 265 | ||
| 123 | spin_lock_bh(&ctx->auth_lock); | 266 | err = crypto_ahash_init(ahreq); |
| 124 | err = crypto_hash_init(&desc); | ||
| 125 | if (err) | 267 | if (err) |
| 126 | goto auth_unlock; | 268 | return ERR_PTR(err); |
| 269 | |||
| 270 | ahash_request_set_crypt(ahreq, req->assoc, hash, req->assoclen); | ||
| 271 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, | ||
| 272 | areq_ctx->update_complete, req); | ||
| 127 | 273 | ||
| 128 | err = crypto_hash_update(&desc, req->assoc, req->assoclen); | 274 | err = crypto_ahash_update(ahreq); |
| 129 | if (err) | 275 | if (err) |
| 130 | goto auth_unlock; | 276 | return ERR_PTR(err); |
| 277 | |||
| 278 | ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, | ||
| 279 | areq_ctx->cryptlen); | ||
| 280 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, | ||
| 281 | areq_ctx->complete, req); | ||
| 131 | 282 | ||
| 132 | err = crypto_hash_update(&desc, cipher, cryptlen); | 283 | err = crypto_ahash_finup(ahreq); |
| 133 | if (err) | 284 | if (err) |
| 134 | goto auth_unlock; | 285 | return ERR_PTR(err); |
| 135 | 286 | ||
| 136 | err = crypto_hash_final(&desc, hash); | 287 | return hash; |
| 137 | auth_unlock: | 288 | } |
| 138 | spin_unlock_bh(&ctx->auth_lock); | 289 | |
| 290 | static u8 *crypto_authenc_ahash(struct aead_request *req, unsigned int flags) | ||
| 291 | { | ||
| 292 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | ||
| 293 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | ||
| 294 | struct crypto_ahash *auth = ctx->auth; | ||
| 295 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
| 296 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
| 297 | u8 *hash = areq_ctx->tail; | ||
| 298 | int err; | ||
| 139 | 299 | ||
| 300 | hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth), | ||
| 301 | crypto_ahash_alignmask(auth) + 1); | ||
| 302 | |||
| 303 | ahash_request_set_tfm(ahreq, auth); | ||
| 304 | ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, | ||
| 305 | areq_ctx->cryptlen); | ||
| 306 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, | ||
| 307 | areq_ctx->complete, req); | ||
| 308 | |||
| 309 | err = crypto_ahash_digest(ahreq); | ||
| 140 | if (err) | 310 | if (err) |
| 141 | return ERR_PTR(err); | 311 | return ERR_PTR(err); |
| 142 | 312 | ||
| @@ -147,11 +317,15 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv, | |||
| 147 | unsigned int flags) | 317 | unsigned int flags) |
| 148 | { | 318 | { |
| 149 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 319 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
| 320 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
| 150 | struct scatterlist *dst = req->dst; | 321 | struct scatterlist *dst = req->dst; |
| 151 | struct scatterlist cipher[2]; | 322 | struct scatterlist *assoc = req->assoc; |
| 152 | struct page *dstp; | 323 | struct scatterlist *cipher = areq_ctx->cipher; |
| 324 | struct scatterlist *asg = areq_ctx->asg; | ||
| 153 | unsigned int ivsize = crypto_aead_ivsize(authenc); | 325 | unsigned int ivsize = crypto_aead_ivsize(authenc); |
| 154 | unsigned int cryptlen; | 326 | unsigned int cryptlen = req->cryptlen; |
| 327 | authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb; | ||
| 328 | struct page *dstp; | ||
| 155 | u8 *vdst; | 329 | u8 *vdst; |
| 156 | u8 *hash; | 330 | u8 *hash; |
| 157 | 331 | ||
| @@ -163,10 +337,25 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv, | |||
| 163 | sg_set_buf(cipher, iv, ivsize); | 337 | sg_set_buf(cipher, iv, ivsize); |
| 164 | authenc_chain(cipher, dst, vdst == iv + ivsize); | 338 | authenc_chain(cipher, dst, vdst == iv + ivsize); |
| 165 | dst = cipher; | 339 | dst = cipher; |
| 340 | cryptlen += ivsize; | ||
| 166 | } | 341 | } |
| 167 | 342 | ||
| 168 | cryptlen = req->cryptlen + ivsize; | 343 | if (sg_is_last(assoc)) { |
| 169 | hash = crypto_authenc_hash(req, flags, dst, cryptlen); | 344 | authenc_ahash_fn = crypto_authenc_ahash; |
| 345 | sg_init_table(asg, 2); | ||
| 346 | sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); | ||
| 347 | authenc_chain(asg, dst, 0); | ||
| 348 | dst = asg; | ||
| 349 | cryptlen += req->assoclen; | ||
| 350 | } | ||
| 351 | |||
| 352 | areq_ctx->cryptlen = cryptlen; | ||
| 353 | areq_ctx->sg = dst; | ||
| 354 | |||
| 355 | areq_ctx->complete = authenc_geniv_ahash_done; | ||
| 356 | areq_ctx->update_complete = authenc_geniv_ahash_update_done; | ||
| 357 | |||
| 358 | hash = authenc_ahash_fn(req, flags); | ||
| 170 | if (IS_ERR(hash)) | 359 | if (IS_ERR(hash)) |
| 171 | return PTR_ERR(hash); | 360 | return PTR_ERR(hash); |
| 172 | 361 | ||
| @@ -256,22 +445,25 @@ static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req) | |||
| 256 | } | 445 | } |
| 257 | 446 | ||
| 258 | static int crypto_authenc_verify(struct aead_request *req, | 447 | static int crypto_authenc_verify(struct aead_request *req, |
| 259 | struct scatterlist *cipher, | 448 | authenc_ahash_t authenc_ahash_fn) |
| 260 | unsigned int cryptlen) | ||
| 261 | { | 449 | { |
| 262 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 450 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
| 451 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
| 263 | u8 *ohash; | 452 | u8 *ohash; |
| 264 | u8 *ihash; | 453 | u8 *ihash; |
| 265 | unsigned int authsize; | 454 | unsigned int authsize; |
| 266 | 455 | ||
| 267 | ohash = crypto_authenc_hash(req, CRYPTO_TFM_REQ_MAY_SLEEP, cipher, | 456 | areq_ctx->complete = authenc_verify_ahash_done; |
| 268 | cryptlen); | 457 | areq_ctx->complete = authenc_verify_ahash_update_done; |
| 458 | |||
| 459 | ohash = authenc_ahash_fn(req, CRYPTO_TFM_REQ_MAY_SLEEP); | ||
| 269 | if (IS_ERR(ohash)) | 460 | if (IS_ERR(ohash)) |
| 270 | return PTR_ERR(ohash); | 461 | return PTR_ERR(ohash); |
| 271 | 462 | ||
| 272 | authsize = crypto_aead_authsize(authenc); | 463 | authsize = crypto_aead_authsize(authenc); |
| 273 | ihash = ohash + authsize; | 464 | ihash = ohash + authsize; |
| 274 | scatterwalk_map_and_copy(ihash, cipher, cryptlen, authsize, 0); | 465 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, |
| 466 | authsize, 0); | ||
| 275 | return memcmp(ihash, ohash, authsize) ? -EBADMSG: 0; | 467 | return memcmp(ihash, ohash, authsize) ? -EBADMSG: 0; |
| 276 | } | 468 | } |
| 277 | 469 | ||
| @@ -279,10 +471,14 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, | |||
| 279 | unsigned int cryptlen) | 471 | unsigned int cryptlen) |
| 280 | { | 472 | { |
| 281 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 473 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
| 474 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
| 282 | struct scatterlist *src = req->src; | 475 | struct scatterlist *src = req->src; |
| 283 | struct scatterlist cipher[2]; | 476 | struct scatterlist *assoc = req->assoc; |
| 284 | struct page *srcp; | 477 | struct scatterlist *cipher = areq_ctx->cipher; |
| 478 | struct scatterlist *asg = areq_ctx->asg; | ||
| 285 | unsigned int ivsize = crypto_aead_ivsize(authenc); | 479 | unsigned int ivsize = crypto_aead_ivsize(authenc); |
| 480 | authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb; | ||
| 481 | struct page *srcp; | ||
| 286 | u8 *vsrc; | 482 | u8 *vsrc; |
| 287 | 483 | ||
| 288 | srcp = sg_page(src); | 484 | srcp = sg_page(src); |
| @@ -293,9 +489,22 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, | |||
| 293 | sg_set_buf(cipher, iv, ivsize); | 489 | sg_set_buf(cipher, iv, ivsize); |
| 294 | authenc_chain(cipher, src, vsrc == iv + ivsize); | 490 | authenc_chain(cipher, src, vsrc == iv + ivsize); |
| 295 | src = cipher; | 491 | src = cipher; |
| 492 | cryptlen += ivsize; | ||
| 493 | } | ||
| 494 | |||
| 495 | if (sg_is_last(assoc)) { | ||
| 496 | authenc_ahash_fn = crypto_authenc_ahash; | ||
| 497 | sg_init_table(asg, 2); | ||
| 498 | sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); | ||
| 499 | authenc_chain(asg, src, 0); | ||
| 500 | src = asg; | ||
| 501 | cryptlen += req->assoclen; | ||
| 296 | } | 502 | } |
| 297 | 503 | ||
| 298 | return crypto_authenc_verify(req, src, cryptlen + ivsize); | 504 | areq_ctx->cryptlen = cryptlen; |
| 505 | areq_ctx->sg = src; | ||
| 506 | |||
| 507 | return crypto_authenc_verify(req, authenc_ahash_fn); | ||
| 299 | } | 508 | } |
| 300 | 509 | ||
| 301 | static int crypto_authenc_decrypt(struct aead_request *req) | 510 | static int crypto_authenc_decrypt(struct aead_request *req) |
| @@ -326,38 +535,41 @@ static int crypto_authenc_decrypt(struct aead_request *req) | |||
| 326 | 535 | ||
| 327 | static int crypto_authenc_init_tfm(struct crypto_tfm *tfm) | 536 | static int crypto_authenc_init_tfm(struct crypto_tfm *tfm) |
| 328 | { | 537 | { |
| 329 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 538 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
| 330 | struct authenc_instance_ctx *ictx = crypto_instance_ctx(inst); | 539 | struct authenc_instance_ctx *ictx = crypto_instance_ctx(inst); |
| 331 | struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm); | 540 | struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm); |
| 332 | struct crypto_hash *auth; | 541 | struct crypto_ahash *auth; |
| 333 | struct crypto_ablkcipher *enc; | 542 | struct crypto_ablkcipher *enc; |
| 334 | int err; | 543 | int err; |
| 335 | 544 | ||
| 336 | auth = crypto_spawn_hash(&ictx->auth); | 545 | auth = crypto_spawn_ahash(&ictx->auth); |
| 337 | if (IS_ERR(auth)) | 546 | if (IS_ERR(auth)) |
| 338 | return PTR_ERR(auth); | 547 | return PTR_ERR(auth); |
| 339 | 548 | ||
| 549 | ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) + | ||
| 550 | crypto_ahash_alignmask(auth), | ||
| 551 | crypto_ahash_alignmask(auth) + 1); | ||
| 552 | |||
| 340 | enc = crypto_spawn_skcipher(&ictx->enc); | 553 | enc = crypto_spawn_skcipher(&ictx->enc); |
| 341 | err = PTR_ERR(enc); | 554 | err = PTR_ERR(enc); |
| 342 | if (IS_ERR(enc)) | 555 | if (IS_ERR(enc)) |
| 343 | goto err_free_hash; | 556 | goto err_free_ahash; |
| 344 | 557 | ||
| 345 | ctx->auth = auth; | 558 | ctx->auth = auth; |
| 346 | ctx->enc = enc; | 559 | ctx->enc = enc; |
| 560 | |||
| 347 | tfm->crt_aead.reqsize = max_t(unsigned int, | 561 | tfm->crt_aead.reqsize = max_t(unsigned int, |
| 348 | (crypto_hash_alignmask(auth) & | 562 | crypto_ahash_reqsize(auth) + ctx->reqoff + |
| 349 | ~(crypto_tfm_ctx_alignment() - 1)) + | 563 | sizeof(struct authenc_request_ctx) + |
| 350 | crypto_hash_digestsize(auth) * 2, | 564 | sizeof(struct ahash_request), |
| 351 | sizeof(struct skcipher_givcrypt_request) + | 565 | sizeof(struct skcipher_givcrypt_request) + |
| 352 | crypto_ablkcipher_reqsize(enc) + | 566 | crypto_ablkcipher_reqsize(enc) + |
| 353 | crypto_ablkcipher_ivsize(enc)); | 567 | crypto_ablkcipher_ivsize(enc)); |
| 354 | |||
| 355 | spin_lock_init(&ctx->auth_lock); | ||
| 356 | 568 | ||
| 357 | return 0; | 569 | return 0; |
| 358 | 570 | ||
| 359 | err_free_hash: | 571 | err_free_ahash: |
| 360 | crypto_free_hash(auth); | 572 | crypto_free_ahash(auth); |
| 361 | return err; | 573 | return err; |
| 362 | } | 574 | } |
| 363 | 575 | ||
| @@ -365,7 +577,7 @@ static void crypto_authenc_exit_tfm(struct crypto_tfm *tfm) | |||
| 365 | { | 577 | { |
| 366 | struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm); | 578 | struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm); |
| 367 | 579 | ||
| 368 | crypto_free_hash(ctx->auth); | 580 | crypto_free_ahash(ctx->auth); |
| 369 | crypto_free_ablkcipher(ctx->enc); | 581 | crypto_free_ablkcipher(ctx->enc); |
| 370 | } | 582 | } |
| 371 | 583 | ||
| @@ -373,7 +585,8 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) | |||
| 373 | { | 585 | { |
| 374 | struct crypto_attr_type *algt; | 586 | struct crypto_attr_type *algt; |
| 375 | struct crypto_instance *inst; | 587 | struct crypto_instance *inst; |
| 376 | struct crypto_alg *auth; | 588 | struct hash_alg_common *auth; |
| 589 | struct crypto_alg *auth_base; | ||
| 377 | struct crypto_alg *enc; | 590 | struct crypto_alg *enc; |
| 378 | struct authenc_instance_ctx *ctx; | 591 | struct authenc_instance_ctx *ctx; |
| 379 | const char *enc_name; | 592 | const char *enc_name; |
| @@ -387,11 +600,13 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) | |||
| 387 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) | 600 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) |
| 388 | return ERR_PTR(-EINVAL); | 601 | return ERR_PTR(-EINVAL); |
| 389 | 602 | ||
| 390 | auth = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, | 603 | auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, |
| 391 | CRYPTO_ALG_TYPE_HASH_MASK); | 604 | CRYPTO_ALG_TYPE_AHASH_MASK); |
| 392 | if (IS_ERR(auth)) | 605 | if (IS_ERR(auth)) |
| 393 | return ERR_PTR(PTR_ERR(auth)); | 606 | return ERR_PTR(PTR_ERR(auth)); |
| 394 | 607 | ||
| 608 | auth_base = &auth->base; | ||
| 609 | |||
| 395 | enc_name = crypto_attr_alg_name(tb[2]); | 610 | enc_name = crypto_attr_alg_name(tb[2]); |
| 396 | err = PTR_ERR(enc_name); | 611 | err = PTR_ERR(enc_name); |
| 397 | if (IS_ERR(enc_name)) | 612 | if (IS_ERR(enc_name)) |
| @@ -404,7 +619,7 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) | |||
| 404 | 619 | ||
| 405 | ctx = crypto_instance_ctx(inst); | 620 | ctx = crypto_instance_ctx(inst); |
| 406 | 621 | ||
| 407 | err = crypto_init_spawn(&ctx->auth, auth, inst, CRYPTO_ALG_TYPE_MASK); | 622 | err = crypto_init_ahash_spawn(&ctx->auth, auth, inst); |
| 408 | if (err) | 623 | if (err) |
| 409 | goto err_free_inst; | 624 | goto err_free_inst; |
| 410 | 625 | ||
| @@ -419,28 +634,25 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) | |||
| 419 | 634 | ||
| 420 | err = -ENAMETOOLONG; | 635 | err = -ENAMETOOLONG; |
| 421 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, | 636 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, |
| 422 | "authenc(%s,%s)", auth->cra_name, enc->cra_name) >= | 637 | "authenc(%s,%s)", auth_base->cra_name, enc->cra_name) >= |
| 423 | CRYPTO_MAX_ALG_NAME) | 638 | CRYPTO_MAX_ALG_NAME) |
| 424 | goto err_drop_enc; | 639 | goto err_drop_enc; |
| 425 | 640 | ||
| 426 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 641 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
| 427 | "authenc(%s,%s)", auth->cra_driver_name, | 642 | "authenc(%s,%s)", auth_base->cra_driver_name, |
| 428 | enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | 643 | enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
| 429 | goto err_drop_enc; | 644 | goto err_drop_enc; |
| 430 | 645 | ||
| 431 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; | 646 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; |
| 432 | inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC; | 647 | inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC; |
| 433 | inst->alg.cra_priority = enc->cra_priority * 10 + auth->cra_priority; | 648 | inst->alg.cra_priority = enc->cra_priority * |
| 649 | 10 + auth_base->cra_priority; | ||
| 434 | inst->alg.cra_blocksize = enc->cra_blocksize; | 650 | inst->alg.cra_blocksize = enc->cra_blocksize; |
| 435 | inst->alg.cra_alignmask = auth->cra_alignmask | enc->cra_alignmask; | 651 | inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask; |
| 436 | inst->alg.cra_type = &crypto_aead_type; | 652 | inst->alg.cra_type = &crypto_aead_type; |
| 437 | 653 | ||
| 438 | inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize; | 654 | inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize; |
| 439 | inst->alg.cra_aead.maxauthsize = auth->cra_type == &crypto_hash_type ? | 655 | inst->alg.cra_aead.maxauthsize = auth->digestsize; |
| 440 | auth->cra_hash.digestsize : | ||
| 441 | auth->cra_type ? | ||
| 442 | __crypto_shash_alg(auth)->digestsize : | ||
| 443 | auth->cra_digest.dia_digestsize; | ||
| 444 | 656 | ||
| 445 | inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_ctx); | 657 | inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_ctx); |
| 446 | 658 | ||
| @@ -453,13 +665,13 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) | |||
| 453 | inst->alg.cra_aead.givencrypt = crypto_authenc_givencrypt; | 665 | inst->alg.cra_aead.givencrypt = crypto_authenc_givencrypt; |
| 454 | 666 | ||
| 455 | out: | 667 | out: |
| 456 | crypto_mod_put(auth); | 668 | crypto_mod_put(auth_base); |
| 457 | return inst; | 669 | return inst; |
| 458 | 670 | ||
| 459 | err_drop_enc: | 671 | err_drop_enc: |
| 460 | crypto_drop_skcipher(&ctx->enc); | 672 | crypto_drop_skcipher(&ctx->enc); |
| 461 | err_drop_auth: | 673 | err_drop_auth: |
| 462 | crypto_drop_spawn(&ctx->auth); | 674 | crypto_drop_ahash(&ctx->auth); |
| 463 | err_free_inst: | 675 | err_free_inst: |
| 464 | kfree(inst); | 676 | kfree(inst); |
| 465 | out_put_auth: | 677 | out_put_auth: |
| @@ -472,7 +684,7 @@ static void crypto_authenc_free(struct crypto_instance *inst) | |||
| 472 | struct authenc_instance_ctx *ctx = crypto_instance_ctx(inst); | 684 | struct authenc_instance_ctx *ctx = crypto_instance_ctx(inst); |
| 473 | 685 | ||
| 474 | crypto_drop_skcipher(&ctx->enc); | 686 | crypto_drop_skcipher(&ctx->enc); |
| 475 | crypto_drop_spawn(&ctx->auth); | 687 | crypto_drop_ahash(&ctx->auth); |
| 476 | kfree(inst); | 688 | kfree(inst); |
| 477 | } | 689 | } |
| 478 | 690 | ||
diff --git a/crypto/cryptd.c b/crypto/cryptd.c index ae5fa99d5d36..35335825a4ef 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c | |||
| @@ -39,6 +39,11 @@ struct cryptd_instance_ctx { | |||
| 39 | struct cryptd_queue *queue; | 39 | struct cryptd_queue *queue; |
| 40 | }; | 40 | }; |
| 41 | 41 | ||
| 42 | struct hashd_instance_ctx { | ||
| 43 | struct crypto_shash_spawn spawn; | ||
| 44 | struct cryptd_queue *queue; | ||
| 45 | }; | ||
| 46 | |||
| 42 | struct cryptd_blkcipher_ctx { | 47 | struct cryptd_blkcipher_ctx { |
| 43 | struct crypto_blkcipher *child; | 48 | struct crypto_blkcipher *child; |
| 44 | }; | 49 | }; |
| @@ -48,11 +53,12 @@ struct cryptd_blkcipher_request_ctx { | |||
| 48 | }; | 53 | }; |
| 49 | 54 | ||
| 50 | struct cryptd_hash_ctx { | 55 | struct cryptd_hash_ctx { |
| 51 | struct crypto_hash *child; | 56 | struct crypto_shash *child; |
| 52 | }; | 57 | }; |
| 53 | 58 | ||
| 54 | struct cryptd_hash_request_ctx { | 59 | struct cryptd_hash_request_ctx { |
| 55 | crypto_completion_t complete; | 60 | crypto_completion_t complete; |
| 61 | struct shash_desc desc; | ||
| 56 | }; | 62 | }; |
| 57 | 63 | ||
| 58 | static void cryptd_queue_worker(struct work_struct *work); | 64 | static void cryptd_queue_worker(struct work_struct *work); |
| @@ -249,32 +255,24 @@ static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) | |||
| 249 | crypto_free_blkcipher(ctx->child); | 255 | crypto_free_blkcipher(ctx->child); |
| 250 | } | 256 | } |
| 251 | 257 | ||
| 252 | static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, | 258 | static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, |
| 253 | struct cryptd_queue *queue) | 259 | unsigned int tail) |
| 254 | { | 260 | { |
| 261 | char *p; | ||
| 255 | struct crypto_instance *inst; | 262 | struct crypto_instance *inst; |
| 256 | struct cryptd_instance_ctx *ctx; | ||
| 257 | int err; | 263 | int err; |
| 258 | 264 | ||
| 259 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | 265 | p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); |
| 260 | if (!inst) { | 266 | if (!p) |
| 261 | inst = ERR_PTR(-ENOMEM); | 267 | return ERR_PTR(-ENOMEM); |
| 262 | goto out; | 268 | |
| 263 | } | 269 | inst = (void *)(p + head); |
| 264 | 270 | ||
| 265 | err = -ENAMETOOLONG; | 271 | err = -ENAMETOOLONG; |
| 266 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 272 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
| 267 | "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | 273 | "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
| 268 | goto out_free_inst; | 274 | goto out_free_inst; |
| 269 | 275 | ||
| 270 | ctx = crypto_instance_ctx(inst); | ||
| 271 | err = crypto_init_spawn(&ctx->spawn, alg, inst, | ||
| 272 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); | ||
| 273 | if (err) | ||
| 274 | goto out_free_inst; | ||
| 275 | |||
| 276 | ctx->queue = queue; | ||
| 277 | |||
| 278 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | 276 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); |
| 279 | 277 | ||
| 280 | inst->alg.cra_priority = alg->cra_priority + 50; | 278 | inst->alg.cra_priority = alg->cra_priority + 50; |
| @@ -282,29 +280,41 @@ static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, | |||
| 282 | inst->alg.cra_alignmask = alg->cra_alignmask; | 280 | inst->alg.cra_alignmask = alg->cra_alignmask; |
| 283 | 281 | ||
| 284 | out: | 282 | out: |
| 285 | return inst; | 283 | return p; |
| 286 | 284 | ||
| 287 | out_free_inst: | 285 | out_free_inst: |
| 288 | kfree(inst); | 286 | kfree(p); |
| 289 | inst = ERR_PTR(err); | 287 | p = ERR_PTR(err); |
| 290 | goto out; | 288 | goto out; |
| 291 | } | 289 | } |
| 292 | 290 | ||
| 293 | static struct crypto_instance *cryptd_alloc_blkcipher( | 291 | static int cryptd_create_blkcipher(struct crypto_template *tmpl, |
| 294 | struct rtattr **tb, struct cryptd_queue *queue) | 292 | struct rtattr **tb, |
| 293 | struct cryptd_queue *queue) | ||
| 295 | { | 294 | { |
| 295 | struct cryptd_instance_ctx *ctx; | ||
| 296 | struct crypto_instance *inst; | 296 | struct crypto_instance *inst; |
| 297 | struct crypto_alg *alg; | 297 | struct crypto_alg *alg; |
| 298 | int err; | ||
| 298 | 299 | ||
| 299 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, | 300 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, |
| 300 | CRYPTO_ALG_TYPE_MASK); | 301 | CRYPTO_ALG_TYPE_MASK); |
| 301 | if (IS_ERR(alg)) | 302 | if (IS_ERR(alg)) |
| 302 | return ERR_CAST(alg); | 303 | return PTR_ERR(alg); |
| 303 | 304 | ||
| 304 | inst = cryptd_alloc_instance(alg, queue); | 305 | inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx)); |
| 306 | err = PTR_ERR(inst); | ||
| 305 | if (IS_ERR(inst)) | 307 | if (IS_ERR(inst)) |
| 306 | goto out_put_alg; | 308 | goto out_put_alg; |
| 307 | 309 | ||
| 310 | ctx = crypto_instance_ctx(inst); | ||
| 311 | ctx->queue = queue; | ||
| 312 | |||
| 313 | err = crypto_init_spawn(&ctx->spawn, alg, inst, | ||
| 314 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); | ||
| 315 | if (err) | ||
| 316 | goto out_free_inst; | ||
| 317 | |||
| 308 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; | 318 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; |
| 309 | inst->alg.cra_type = &crypto_ablkcipher_type; | 319 | inst->alg.cra_type = &crypto_ablkcipher_type; |
| 310 | 320 | ||
| @@ -323,26 +333,34 @@ static struct crypto_instance *cryptd_alloc_blkcipher( | |||
| 323 | inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; | 333 | inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; |
| 324 | inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; | 334 | inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; |
| 325 | 335 | ||
| 336 | err = crypto_register_instance(tmpl, inst); | ||
| 337 | if (err) { | ||
| 338 | crypto_drop_spawn(&ctx->spawn); | ||
| 339 | out_free_inst: | ||
| 340 | kfree(inst); | ||
| 341 | } | ||
| 342 | |||
| 326 | out_put_alg: | 343 | out_put_alg: |
| 327 | crypto_mod_put(alg); | 344 | crypto_mod_put(alg); |
| 328 | return inst; | 345 | return err; |
| 329 | } | 346 | } |
| 330 | 347 | ||
| 331 | static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) | 348 | static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) |
| 332 | { | 349 | { |
| 333 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 350 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
| 334 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | 351 | struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); |
| 335 | struct crypto_spawn *spawn = &ictx->spawn; | 352 | struct crypto_shash_spawn *spawn = &ictx->spawn; |
| 336 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | 353 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
| 337 | struct crypto_hash *cipher; | 354 | struct crypto_shash *hash; |
| 338 | 355 | ||
| 339 | cipher = crypto_spawn_hash(spawn); | 356 | hash = crypto_spawn_shash(spawn); |
| 340 | if (IS_ERR(cipher)) | 357 | if (IS_ERR(hash)) |
| 341 | return PTR_ERR(cipher); | 358 | return PTR_ERR(hash); |
| 342 | 359 | ||
| 343 | ctx->child = cipher; | 360 | ctx->child = hash; |
| 344 | tfm->crt_ahash.reqsize = | 361 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
| 345 | sizeof(struct cryptd_hash_request_ctx); | 362 | sizeof(struct cryptd_hash_request_ctx) + |
| 363 | crypto_shash_descsize(hash)); | ||
| 346 | return 0; | 364 | return 0; |
| 347 | } | 365 | } |
| 348 | 366 | ||
| @@ -350,22 +368,22 @@ static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) | |||
| 350 | { | 368 | { |
| 351 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | 369 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
| 352 | 370 | ||
| 353 | crypto_free_hash(ctx->child); | 371 | crypto_free_shash(ctx->child); |
| 354 | } | 372 | } |
| 355 | 373 | ||
| 356 | static int cryptd_hash_setkey(struct crypto_ahash *parent, | 374 | static int cryptd_hash_setkey(struct crypto_ahash *parent, |
| 357 | const u8 *key, unsigned int keylen) | 375 | const u8 *key, unsigned int keylen) |
| 358 | { | 376 | { |
| 359 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); | 377 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); |
| 360 | struct crypto_hash *child = ctx->child; | 378 | struct crypto_shash *child = ctx->child; |
| 361 | int err; | 379 | int err; |
| 362 | 380 | ||
| 363 | crypto_hash_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 381 | crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
| 364 | crypto_hash_set_flags(child, crypto_ahash_get_flags(parent) & | 382 | crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & |
| 365 | CRYPTO_TFM_REQ_MASK); | 383 | CRYPTO_TFM_REQ_MASK); |
| 366 | err = crypto_hash_setkey(child, key, keylen); | 384 | err = crypto_shash_setkey(child, key, keylen); |
| 367 | crypto_ahash_set_flags(parent, crypto_hash_get_flags(child) & | 385 | crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & |
| 368 | CRYPTO_TFM_RES_MASK); | 386 | CRYPTO_TFM_RES_MASK); |
| 369 | return err; | 387 | return err; |
| 370 | } | 388 | } |
| 371 | 389 | ||
| @@ -385,21 +403,19 @@ static int cryptd_hash_enqueue(struct ahash_request *req, | |||
| 385 | 403 | ||
| 386 | static void cryptd_hash_init(struct crypto_async_request *req_async, int err) | 404 | static void cryptd_hash_init(struct crypto_async_request *req_async, int err) |
| 387 | { | 405 | { |
| 388 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | 406 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); |
| 389 | struct crypto_hash *child = ctx->child; | 407 | struct crypto_shash *child = ctx->child; |
| 390 | struct ahash_request *req = ahash_request_cast(req_async); | 408 | struct ahash_request *req = ahash_request_cast(req_async); |
| 391 | struct cryptd_hash_request_ctx *rctx; | 409 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
| 392 | struct hash_desc desc; | 410 | struct shash_desc *desc = &rctx->desc; |
| 393 | |||
| 394 | rctx = ahash_request_ctx(req); | ||
| 395 | 411 | ||
| 396 | if (unlikely(err == -EINPROGRESS)) | 412 | if (unlikely(err == -EINPROGRESS)) |
| 397 | goto out; | 413 | goto out; |
| 398 | 414 | ||
| 399 | desc.tfm = child; | 415 | desc->tfm = child; |
| 400 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 416 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
| 401 | 417 | ||
| 402 | err = crypto_hash_crt(child)->init(&desc); | 418 | err = crypto_shash_init(desc); |
| 403 | 419 | ||
| 404 | req->base.complete = rctx->complete; | 420 | req->base.complete = rctx->complete; |
| 405 | 421 | ||
| @@ -416,23 +432,15 @@ static int cryptd_hash_init_enqueue(struct ahash_request *req) | |||
| 416 | 432 | ||
| 417 | static void cryptd_hash_update(struct crypto_async_request *req_async, int err) | 433 | static void cryptd_hash_update(struct crypto_async_request *req_async, int err) |
| 418 | { | 434 | { |
| 419 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | 435 | struct ahash_request *req = ahash_request_cast(req_async); |
| 420 | struct crypto_hash *child = ctx->child; | ||
| 421 | struct ahash_request *req = ahash_request_cast(req_async); | ||
| 422 | struct cryptd_hash_request_ctx *rctx; | 436 | struct cryptd_hash_request_ctx *rctx; |
| 423 | struct hash_desc desc; | ||
| 424 | 437 | ||
| 425 | rctx = ahash_request_ctx(req); | 438 | rctx = ahash_request_ctx(req); |
| 426 | 439 | ||
| 427 | if (unlikely(err == -EINPROGRESS)) | 440 | if (unlikely(err == -EINPROGRESS)) |
| 428 | goto out; | 441 | goto out; |
| 429 | 442 | ||
| 430 | desc.tfm = child; | 443 | err = shash_ahash_update(req, &rctx->desc); |
| 431 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 432 | |||
| 433 | err = crypto_hash_crt(child)->update(&desc, | ||
| 434 | req->src, | ||
| 435 | req->nbytes); | ||
| 436 | 444 | ||
| 437 | req->base.complete = rctx->complete; | 445 | req->base.complete = rctx->complete; |
| 438 | 446 | ||
| @@ -449,21 +457,13 @@ static int cryptd_hash_update_enqueue(struct ahash_request *req) | |||
| 449 | 457 | ||
| 450 | static void cryptd_hash_final(struct crypto_async_request *req_async, int err) | 458 | static void cryptd_hash_final(struct crypto_async_request *req_async, int err) |
| 451 | { | 459 | { |
| 452 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | 460 | struct ahash_request *req = ahash_request_cast(req_async); |
| 453 | struct crypto_hash *child = ctx->child; | 461 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
| 454 | struct ahash_request *req = ahash_request_cast(req_async); | ||
| 455 | struct cryptd_hash_request_ctx *rctx; | ||
| 456 | struct hash_desc desc; | ||
| 457 | |||
| 458 | rctx = ahash_request_ctx(req); | ||
| 459 | 462 | ||
| 460 | if (unlikely(err == -EINPROGRESS)) | 463 | if (unlikely(err == -EINPROGRESS)) |
| 461 | goto out; | 464 | goto out; |
| 462 | 465 | ||
| 463 | desc.tfm = child; | 466 | err = crypto_shash_final(&rctx->desc, req->result); |
| 464 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 465 | |||
| 466 | err = crypto_hash_crt(child)->final(&desc, req->result); | ||
| 467 | 467 | ||
| 468 | req->base.complete = rctx->complete; | 468 | req->base.complete = rctx->complete; |
| 469 | 469 | ||
| @@ -478,26 +478,44 @@ static int cryptd_hash_final_enqueue(struct ahash_request *req) | |||
| 478 | return cryptd_hash_enqueue(req, cryptd_hash_final); | 478 | return cryptd_hash_enqueue(req, cryptd_hash_final); |
| 479 | } | 479 | } |
| 480 | 480 | ||
| 481 | static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) | 481 | static void cryptd_hash_finup(struct crypto_async_request *req_async, int err) |
| 482 | { | 482 | { |
| 483 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | 483 | struct ahash_request *req = ahash_request_cast(req_async); |
| 484 | struct crypto_hash *child = ctx->child; | 484 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
| 485 | struct ahash_request *req = ahash_request_cast(req_async); | ||
| 486 | struct cryptd_hash_request_ctx *rctx; | ||
| 487 | struct hash_desc desc; | ||
| 488 | 485 | ||
| 489 | rctx = ahash_request_ctx(req); | 486 | if (unlikely(err == -EINPROGRESS)) |
| 487 | goto out; | ||
| 488 | |||
| 489 | err = shash_ahash_finup(req, &rctx->desc); | ||
| 490 | |||
| 491 | req->base.complete = rctx->complete; | ||
| 492 | |||
| 493 | out: | ||
| 494 | local_bh_disable(); | ||
| 495 | rctx->complete(&req->base, err); | ||
| 496 | local_bh_enable(); | ||
| 497 | } | ||
| 498 | |||
| 499 | static int cryptd_hash_finup_enqueue(struct ahash_request *req) | ||
| 500 | { | ||
| 501 | return cryptd_hash_enqueue(req, cryptd_hash_finup); | ||
| 502 | } | ||
| 503 | |||
| 504 | static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) | ||
| 505 | { | ||
| 506 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | ||
| 507 | struct crypto_shash *child = ctx->child; | ||
| 508 | struct ahash_request *req = ahash_request_cast(req_async); | ||
| 509 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | ||
| 510 | struct shash_desc *desc = &rctx->desc; | ||
| 490 | 511 | ||
| 491 | if (unlikely(err == -EINPROGRESS)) | 512 | if (unlikely(err == -EINPROGRESS)) |
| 492 | goto out; | 513 | goto out; |
| 493 | 514 | ||
| 494 | desc.tfm = child; | 515 | desc->tfm = child; |
| 495 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 516 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
| 496 | 517 | ||
| 497 | err = crypto_hash_crt(child)->digest(&desc, | 518 | err = shash_ahash_digest(req, desc); |
| 498 | req->src, | ||
| 499 | req->nbytes, | ||
| 500 | req->result); | ||
| 501 | 519 | ||
| 502 | req->base.complete = rctx->complete; | 520 | req->base.complete = rctx->complete; |
| 503 | 521 | ||
| @@ -512,64 +530,108 @@ static int cryptd_hash_digest_enqueue(struct ahash_request *req) | |||
| 512 | return cryptd_hash_enqueue(req, cryptd_hash_digest); | 530 | return cryptd_hash_enqueue(req, cryptd_hash_digest); |
| 513 | } | 531 | } |
| 514 | 532 | ||
| 515 | static struct crypto_instance *cryptd_alloc_hash( | 533 | static int cryptd_hash_export(struct ahash_request *req, void *out) |
| 516 | struct rtattr **tb, struct cryptd_queue *queue) | ||
| 517 | { | 534 | { |
| 518 | struct crypto_instance *inst; | 535 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
| 536 | |||
| 537 | return crypto_shash_export(&rctx->desc, out); | ||
| 538 | } | ||
| 539 | |||
| 540 | static int cryptd_hash_import(struct ahash_request *req, const void *in) | ||
| 541 | { | ||
| 542 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | ||
| 543 | |||
| 544 | return crypto_shash_import(&rctx->desc, in); | ||
| 545 | } | ||
| 546 | |||
| 547 | static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, | ||
| 548 | struct cryptd_queue *queue) | ||
| 549 | { | ||
| 550 | struct hashd_instance_ctx *ctx; | ||
| 551 | struct ahash_instance *inst; | ||
| 552 | struct shash_alg *salg; | ||
| 519 | struct crypto_alg *alg; | 553 | struct crypto_alg *alg; |
| 554 | int err; | ||
| 520 | 555 | ||
| 521 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH, | 556 | salg = shash_attr_alg(tb[1], 0, 0); |
| 522 | CRYPTO_ALG_TYPE_HASH_MASK); | 557 | if (IS_ERR(salg)) |
| 523 | if (IS_ERR(alg)) | 558 | return PTR_ERR(salg); |
| 524 | return ERR_PTR(PTR_ERR(alg)); | ||
| 525 | 559 | ||
| 526 | inst = cryptd_alloc_instance(alg, queue); | 560 | alg = &salg->base; |
| 561 | inst = cryptd_alloc_instance(alg, ahash_instance_headroom(), | ||
| 562 | sizeof(*ctx)); | ||
| 563 | err = PTR_ERR(inst); | ||
| 527 | if (IS_ERR(inst)) | 564 | if (IS_ERR(inst)) |
| 528 | goto out_put_alg; | 565 | goto out_put_alg; |
| 529 | 566 | ||
| 530 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC; | 567 | ctx = ahash_instance_ctx(inst); |
| 531 | inst->alg.cra_type = &crypto_ahash_type; | 568 | ctx->queue = queue; |
| 532 | 569 | ||
| 533 | inst->alg.cra_ahash.digestsize = alg->cra_hash.digestsize; | 570 | err = crypto_init_shash_spawn(&ctx->spawn, salg, |
| 534 | inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx); | 571 | ahash_crypto_instance(inst)); |
| 572 | if (err) | ||
| 573 | goto out_free_inst; | ||
| 535 | 574 | ||
| 536 | inst->alg.cra_init = cryptd_hash_init_tfm; | 575 | inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC; |
| 537 | inst->alg.cra_exit = cryptd_hash_exit_tfm; | ||
| 538 | 576 | ||
| 539 | inst->alg.cra_ahash.init = cryptd_hash_init_enqueue; | 577 | inst->alg.halg.digestsize = salg->digestsize; |
| 540 | inst->alg.cra_ahash.update = cryptd_hash_update_enqueue; | 578 | inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); |
| 541 | inst->alg.cra_ahash.final = cryptd_hash_final_enqueue; | 579 | |
| 542 | inst->alg.cra_ahash.setkey = cryptd_hash_setkey; | 580 | inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; |
| 543 | inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue; | 581 | inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm; |
| 582 | |||
| 583 | inst->alg.init = cryptd_hash_init_enqueue; | ||
| 584 | inst->alg.update = cryptd_hash_update_enqueue; | ||
| 585 | inst->alg.final = cryptd_hash_final_enqueue; | ||
| 586 | inst->alg.finup = cryptd_hash_finup_enqueue; | ||
| 587 | inst->alg.export = cryptd_hash_export; | ||
| 588 | inst->alg.import = cryptd_hash_import; | ||
| 589 | inst->alg.setkey = cryptd_hash_setkey; | ||
| 590 | inst->alg.digest = cryptd_hash_digest_enqueue; | ||
| 591 | |||
| 592 | err = ahash_register_instance(tmpl, inst); | ||
| 593 | if (err) { | ||
| 594 | crypto_drop_shash(&ctx->spawn); | ||
| 595 | out_free_inst: | ||
| 596 | kfree(inst); | ||
| 597 | } | ||
| 544 | 598 | ||
| 545 | out_put_alg: | 599 | out_put_alg: |
| 546 | crypto_mod_put(alg); | 600 | crypto_mod_put(alg); |
| 547 | return inst; | 601 | return err; |
| 548 | } | 602 | } |
| 549 | 603 | ||
| 550 | static struct cryptd_queue queue; | 604 | static struct cryptd_queue queue; |
| 551 | 605 | ||
| 552 | static struct crypto_instance *cryptd_alloc(struct rtattr **tb) | 606 | static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) |
| 553 | { | 607 | { |
| 554 | struct crypto_attr_type *algt; | 608 | struct crypto_attr_type *algt; |
| 555 | 609 | ||
| 556 | algt = crypto_get_attr_type(tb); | 610 | algt = crypto_get_attr_type(tb); |
| 557 | if (IS_ERR(algt)) | 611 | if (IS_ERR(algt)) |
| 558 | return ERR_CAST(algt); | 612 | return PTR_ERR(algt); |
| 559 | 613 | ||
| 560 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | 614 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { |
| 561 | case CRYPTO_ALG_TYPE_BLKCIPHER: | 615 | case CRYPTO_ALG_TYPE_BLKCIPHER: |
| 562 | return cryptd_alloc_blkcipher(tb, &queue); | 616 | return cryptd_create_blkcipher(tmpl, tb, &queue); |
| 563 | case CRYPTO_ALG_TYPE_DIGEST: | 617 | case CRYPTO_ALG_TYPE_DIGEST: |
| 564 | return cryptd_alloc_hash(tb, &queue); | 618 | return cryptd_create_hash(tmpl, tb, &queue); |
| 565 | } | 619 | } |
| 566 | 620 | ||
| 567 | return ERR_PTR(-EINVAL); | 621 | return -EINVAL; |
| 568 | } | 622 | } |
| 569 | 623 | ||
| 570 | static void cryptd_free(struct crypto_instance *inst) | 624 | static void cryptd_free(struct crypto_instance *inst) |
| 571 | { | 625 | { |
| 572 | struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); | 626 | struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); |
| 627 | struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); | ||
| 628 | |||
| 629 | switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { | ||
| 630 | case CRYPTO_ALG_TYPE_AHASH: | ||
| 631 | crypto_drop_shash(&hctx->spawn); | ||
| 632 | kfree(ahash_instance(inst)); | ||
| 633 | return; | ||
| 634 | } | ||
| 573 | 635 | ||
| 574 | crypto_drop_spawn(&ctx->spawn); | 636 | crypto_drop_spawn(&ctx->spawn); |
| 575 | kfree(inst); | 637 | kfree(inst); |
| @@ -577,7 +639,7 @@ static void cryptd_free(struct crypto_instance *inst) | |||
| 577 | 639 | ||
| 578 | static struct crypto_template cryptd_tmpl = { | 640 | static struct crypto_template cryptd_tmpl = { |
| 579 | .name = "cryptd", | 641 | .name = "cryptd", |
| 580 | .alloc = cryptd_alloc, | 642 | .create = cryptd_create, |
| 581 | .free = cryptd_free, | 643 | .free = cryptd_free, |
| 582 | .module = THIS_MODULE, | 644 | .module = THIS_MODULE, |
| 583 | }; | 645 | }; |
| @@ -620,6 +682,41 @@ void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) | |||
| 620 | } | 682 | } |
| 621 | EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); | 683 | EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); |
| 622 | 684 | ||
| 685 | struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, | ||
| 686 | u32 type, u32 mask) | ||
| 687 | { | ||
| 688 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | ||
| 689 | struct crypto_ahash *tfm; | ||
| 690 | |||
| 691 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | ||
| 692 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | ||
| 693 | return ERR_PTR(-EINVAL); | ||
| 694 | tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask); | ||
| 695 | if (IS_ERR(tfm)) | ||
| 696 | return ERR_CAST(tfm); | ||
| 697 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { | ||
| 698 | crypto_free_ahash(tfm); | ||
| 699 | return ERR_PTR(-EINVAL); | ||
| 700 | } | ||
| 701 | |||
| 702 | return __cryptd_ahash_cast(tfm); | ||
| 703 | } | ||
| 704 | EXPORT_SYMBOL_GPL(cryptd_alloc_ahash); | ||
| 705 | |||
| 706 | struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm) | ||
| 707 | { | ||
| 708 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); | ||
| 709 | |||
| 710 | return ctx->child; | ||
| 711 | } | ||
| 712 | EXPORT_SYMBOL_GPL(cryptd_ahash_child); | ||
| 713 | |||
| 714 | void cryptd_free_ahash(struct cryptd_ahash *tfm) | ||
| 715 | { | ||
| 716 | crypto_free_ahash(&tfm->base); | ||
| 717 | } | ||
| 718 | EXPORT_SYMBOL_GPL(cryptd_free_ahash); | ||
| 719 | |||
| 623 | static int __init cryptd_init(void) | 720 | static int __init cryptd_init(void) |
| 624 | { | 721 | { |
| 625 | int err; | 722 | int err; |
diff --git a/crypto/ctr.c b/crypto/ctr.c index 2d7425f0e7b8..6c3bfabb9d1d 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c | |||
| @@ -219,6 +219,8 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) | |||
| 219 | inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt; | 219 | inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt; |
| 220 | inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt; | 220 | inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt; |
| 221 | 221 | ||
| 222 | inst->alg.cra_blkcipher.geniv = "chainiv"; | ||
| 223 | |||
| 222 | out: | 224 | out: |
| 223 | crypto_mod_put(alg); | 225 | crypto_mod_put(alg); |
| 224 | return inst; | 226 | return inst; |
diff --git a/crypto/gcm.c b/crypto/gcm.c index e70afd0c73dd..5fc3292483ef 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c | |||
| @@ -11,7 +11,10 @@ | |||
| 11 | #include <crypto/gf128mul.h> | 11 | #include <crypto/gf128mul.h> |
| 12 | #include <crypto/internal/aead.h> | 12 | #include <crypto/internal/aead.h> |
| 13 | #include <crypto/internal/skcipher.h> | 13 | #include <crypto/internal/skcipher.h> |
| 14 | #include <crypto/internal/hash.h> | ||
| 14 | #include <crypto/scatterwalk.h> | 15 | #include <crypto/scatterwalk.h> |
| 16 | #include <crypto/hash.h> | ||
| 17 | #include "internal.h" | ||
| 15 | #include <linux/completion.h> | 18 | #include <linux/completion.h> |
| 16 | #include <linux/err.h> | 19 | #include <linux/err.h> |
| 17 | #include <linux/init.h> | 20 | #include <linux/init.h> |
| @@ -21,11 +24,12 @@ | |||
| 21 | 24 | ||
| 22 | struct gcm_instance_ctx { | 25 | struct gcm_instance_ctx { |
| 23 | struct crypto_skcipher_spawn ctr; | 26 | struct crypto_skcipher_spawn ctr; |
| 27 | struct crypto_ahash_spawn ghash; | ||
| 24 | }; | 28 | }; |
| 25 | 29 | ||
| 26 | struct crypto_gcm_ctx { | 30 | struct crypto_gcm_ctx { |
| 27 | struct crypto_ablkcipher *ctr; | 31 | struct crypto_ablkcipher *ctr; |
| 28 | struct gf128mul_4k *gf128; | 32 | struct crypto_ahash *ghash; |
| 29 | }; | 33 | }; |
| 30 | 34 | ||
| 31 | struct crypto_rfc4106_ctx { | 35 | struct crypto_rfc4106_ctx { |
| @@ -34,10 +38,9 @@ struct crypto_rfc4106_ctx { | |||
| 34 | }; | 38 | }; |
| 35 | 39 | ||
| 36 | struct crypto_gcm_ghash_ctx { | 40 | struct crypto_gcm_ghash_ctx { |
| 37 | u32 bytes; | 41 | unsigned int cryptlen; |
| 38 | u32 flags; | 42 | struct scatterlist *src; |
| 39 | struct gf128mul_4k *gf128; | 43 | crypto_completion_t complete; |
| 40 | u8 buffer[16]; | ||
| 41 | }; | 44 | }; |
| 42 | 45 | ||
| 43 | struct crypto_gcm_req_priv_ctx { | 46 | struct crypto_gcm_req_priv_ctx { |
| @@ -45,8 +48,11 @@ struct crypto_gcm_req_priv_ctx { | |||
| 45 | u8 iauth_tag[16]; | 48 | u8 iauth_tag[16]; |
| 46 | struct scatterlist src[2]; | 49 | struct scatterlist src[2]; |
| 47 | struct scatterlist dst[2]; | 50 | struct scatterlist dst[2]; |
| 48 | struct crypto_gcm_ghash_ctx ghash; | 51 | struct crypto_gcm_ghash_ctx ghash_ctx; |
| 49 | struct ablkcipher_request abreq; | 52 | union { |
| 53 | struct ahash_request ahreq; | ||
| 54 | struct ablkcipher_request abreq; | ||
| 55 | } u; | ||
| 50 | }; | 56 | }; |
| 51 | 57 | ||
| 52 | struct crypto_gcm_setkey_result { | 58 | struct crypto_gcm_setkey_result { |
| @@ -54,6 +60,8 @@ struct crypto_gcm_setkey_result { | |||
| 54 | struct completion completion; | 60 | struct completion completion; |
| 55 | }; | 61 | }; |
| 56 | 62 | ||
| 63 | static void *gcm_zeroes; | ||
| 64 | |||
| 57 | static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( | 65 | static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( |
| 58 | struct aead_request *req) | 66 | struct aead_request *req) |
| 59 | { | 67 | { |
| @@ -62,113 +70,6 @@ static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( | |||
| 62 | return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); | 70 | return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); |
| 63 | } | 71 | } |
| 64 | 72 | ||
| 65 | static void crypto_gcm_ghash_init(struct crypto_gcm_ghash_ctx *ctx, u32 flags, | ||
| 66 | struct gf128mul_4k *gf128) | ||
| 67 | { | ||
| 68 | ctx->bytes = 0; | ||
| 69 | ctx->flags = flags; | ||
| 70 | ctx->gf128 = gf128; | ||
| 71 | memset(ctx->buffer, 0, 16); | ||
| 72 | } | ||
| 73 | |||
| 74 | static void crypto_gcm_ghash_update(struct crypto_gcm_ghash_ctx *ctx, | ||
| 75 | const u8 *src, unsigned int srclen) | ||
| 76 | { | ||
| 77 | u8 *dst = ctx->buffer; | ||
| 78 | |||
| 79 | if (ctx->bytes) { | ||
| 80 | int n = min(srclen, ctx->bytes); | ||
| 81 | u8 *pos = dst + (16 - ctx->bytes); | ||
| 82 | |||
| 83 | ctx->bytes -= n; | ||
| 84 | srclen -= n; | ||
| 85 | |||
| 86 | while (n--) | ||
| 87 | *pos++ ^= *src++; | ||
| 88 | |||
| 89 | if (!ctx->bytes) | ||
| 90 | gf128mul_4k_lle((be128 *)dst, ctx->gf128); | ||
| 91 | } | ||
| 92 | |||
| 93 | while (srclen >= 16) { | ||
| 94 | crypto_xor(dst, src, 16); | ||
| 95 | gf128mul_4k_lle((be128 *)dst, ctx->gf128); | ||
| 96 | src += 16; | ||
| 97 | srclen -= 16; | ||
| 98 | } | ||
| 99 | |||
| 100 | if (srclen) { | ||
| 101 | ctx->bytes = 16 - srclen; | ||
| 102 | while (srclen--) | ||
| 103 | *dst++ ^= *src++; | ||
| 104 | } | ||
| 105 | } | ||
| 106 | |||
| 107 | static void crypto_gcm_ghash_update_sg(struct crypto_gcm_ghash_ctx *ctx, | ||
| 108 | struct scatterlist *sg, int len) | ||
| 109 | { | ||
| 110 | struct scatter_walk walk; | ||
| 111 | u8 *src; | ||
| 112 | int n; | ||
| 113 | |||
| 114 | if (!len) | ||
| 115 | return; | ||
| 116 | |||
| 117 | scatterwalk_start(&walk, sg); | ||
| 118 | |||
| 119 | while (len) { | ||
| 120 | n = scatterwalk_clamp(&walk, len); | ||
| 121 | |||
| 122 | if (!n) { | ||
| 123 | scatterwalk_start(&walk, scatterwalk_sg_next(walk.sg)); | ||
| 124 | n = scatterwalk_clamp(&walk, len); | ||
| 125 | } | ||
| 126 | |||
| 127 | src = scatterwalk_map(&walk, 0); | ||
| 128 | |||
| 129 | crypto_gcm_ghash_update(ctx, src, n); | ||
| 130 | len -= n; | ||
| 131 | |||
| 132 | scatterwalk_unmap(src, 0); | ||
| 133 | scatterwalk_advance(&walk, n); | ||
| 134 | scatterwalk_done(&walk, 0, len); | ||
| 135 | if (len) | ||
| 136 | crypto_yield(ctx->flags); | ||
| 137 | } | ||
| 138 | } | ||
| 139 | |||
| 140 | static void crypto_gcm_ghash_flush(struct crypto_gcm_ghash_ctx *ctx) | ||
| 141 | { | ||
| 142 | u8 *dst = ctx->buffer; | ||
| 143 | |||
| 144 | if (ctx->bytes) { | ||
| 145 | u8 *tmp = dst + (16 - ctx->bytes); | ||
| 146 | |||
| 147 | while (ctx->bytes--) | ||
| 148 | *tmp++ ^= 0; | ||
| 149 | |||
| 150 | gf128mul_4k_lle((be128 *)dst, ctx->gf128); | ||
| 151 | } | ||
| 152 | |||
| 153 | ctx->bytes = 0; | ||
| 154 | } | ||
| 155 | |||
| 156 | static void crypto_gcm_ghash_final_xor(struct crypto_gcm_ghash_ctx *ctx, | ||
| 157 | unsigned int authlen, | ||
| 158 | unsigned int cryptlen, u8 *dst) | ||
| 159 | { | ||
| 160 | u8 *buf = ctx->buffer; | ||
| 161 | u128 lengths; | ||
| 162 | |||
| 163 | lengths.a = cpu_to_be64(authlen * 8); | ||
| 164 | lengths.b = cpu_to_be64(cryptlen * 8); | ||
| 165 | |||
| 166 | crypto_gcm_ghash_flush(ctx); | ||
| 167 | crypto_xor(buf, (u8 *)&lengths, 16); | ||
| 168 | gf128mul_4k_lle((be128 *)buf, ctx->gf128); | ||
| 169 | crypto_xor(dst, buf, 16); | ||
| 170 | } | ||
| 171 | |||
| 172 | static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err) | 73 | static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err) |
| 173 | { | 74 | { |
| 174 | struct crypto_gcm_setkey_result *result = req->data; | 75 | struct crypto_gcm_setkey_result *result = req->data; |
| @@ -184,6 +85,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, | |||
| 184 | unsigned int keylen) | 85 | unsigned int keylen) |
| 185 | { | 86 | { |
| 186 | struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); | 87 | struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); |
| 88 | struct crypto_ahash *ghash = ctx->ghash; | ||
| 187 | struct crypto_ablkcipher *ctr = ctx->ctr; | 89 | struct crypto_ablkcipher *ctr = ctx->ctr; |
| 188 | struct { | 90 | struct { |
| 189 | be128 hash; | 91 | be128 hash; |
| @@ -233,13 +135,12 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, | |||
| 233 | if (err) | 135 | if (err) |
| 234 | goto out; | 136 | goto out; |
| 235 | 137 | ||
| 236 | if (ctx->gf128 != NULL) | 138 | crypto_ahash_clear_flags(ghash, CRYPTO_TFM_REQ_MASK); |
| 237 | gf128mul_free_4k(ctx->gf128); | 139 | crypto_ahash_set_flags(ghash, crypto_aead_get_flags(aead) & |
| 238 | 140 | CRYPTO_TFM_REQ_MASK); | |
| 239 | ctx->gf128 = gf128mul_init_4k_lle(&data->hash); | 141 | err = crypto_ahash_setkey(ghash, (u8 *)&data->hash, sizeof(be128)); |
| 240 | 142 | crypto_aead_set_flags(aead, crypto_ahash_get_flags(ghash) & | |
| 241 | if (ctx->gf128 == NULL) | 143 | CRYPTO_TFM_RES_MASK); |
| 242 | err = -ENOMEM; | ||
| 243 | 144 | ||
| 244 | out: | 145 | out: |
| 245 | kfree(data); | 146 | kfree(data); |
| @@ -272,8 +173,6 @@ static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req, | |||
| 272 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 173 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
| 273 | struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); | 174 | struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); |
| 274 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 175 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
| 275 | u32 flags = req->base.tfm->crt_flags; | ||
| 276 | struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; | ||
| 277 | struct scatterlist *dst; | 176 | struct scatterlist *dst; |
| 278 | __be32 counter = cpu_to_be32(1); | 177 | __be32 counter = cpu_to_be32(1); |
| 279 | 178 | ||
| @@ -296,108 +195,398 @@ static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req, | |||
| 296 | ablkcipher_request_set_crypt(ablk_req, pctx->src, dst, | 195 | ablkcipher_request_set_crypt(ablk_req, pctx->src, dst, |
| 297 | cryptlen + sizeof(pctx->auth_tag), | 196 | cryptlen + sizeof(pctx->auth_tag), |
| 298 | req->iv); | 197 | req->iv); |
| 198 | } | ||
| 199 | |||
| 200 | static inline unsigned int gcm_remain(unsigned int len) | ||
| 201 | { | ||
| 202 | len &= 0xfU; | ||
| 203 | return len ? 16 - len : 0; | ||
| 204 | } | ||
| 205 | |||
| 206 | static void gcm_hash_len_done(struct crypto_async_request *areq, int err); | ||
| 207 | static void gcm_hash_final_done(struct crypto_async_request *areq, int err); | ||
| 299 | 208 | ||
| 300 | crypto_gcm_ghash_init(ghash, flags, ctx->gf128); | 209 | static int gcm_hash_update(struct aead_request *req, |
| 210 | struct crypto_gcm_req_priv_ctx *pctx, | ||
| 211 | crypto_completion_t complete, | ||
| 212 | struct scatterlist *src, | ||
| 213 | unsigned int len) | ||
| 214 | { | ||
| 215 | struct ahash_request *ahreq = &pctx->u.ahreq; | ||
| 301 | 216 | ||
| 302 | crypto_gcm_ghash_update_sg(ghash, req->assoc, req->assoclen); | 217 | ahash_request_set_callback(ahreq, aead_request_flags(req), |
| 303 | crypto_gcm_ghash_flush(ghash); | 218 | complete, req); |
| 219 | ahash_request_set_crypt(ahreq, src, NULL, len); | ||
| 220 | |||
| 221 | return crypto_ahash_update(ahreq); | ||
| 304 | } | 222 | } |
| 305 | 223 | ||
| 306 | static int crypto_gcm_hash(struct aead_request *req) | 224 | static int gcm_hash_remain(struct aead_request *req, |
| 225 | struct crypto_gcm_req_priv_ctx *pctx, | ||
| 226 | unsigned int remain, | ||
| 227 | crypto_completion_t complete) | ||
| 307 | { | 228 | { |
| 308 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 229 | struct ahash_request *ahreq = &pctx->u.ahreq; |
| 230 | |||
| 231 | ahash_request_set_callback(ahreq, aead_request_flags(req), | ||
| 232 | complete, req); | ||
| 233 | sg_init_one(pctx->src, gcm_zeroes, remain); | ||
| 234 | ahash_request_set_crypt(ahreq, pctx->src, NULL, remain); | ||
| 235 | |||
| 236 | return crypto_ahash_update(ahreq); | ||
| 237 | } | ||
| 238 | |||
| 239 | static int gcm_hash_len(struct aead_request *req, | ||
| 240 | struct crypto_gcm_req_priv_ctx *pctx) | ||
| 241 | { | ||
| 242 | struct ahash_request *ahreq = &pctx->u.ahreq; | ||
| 243 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | ||
| 244 | u128 lengths; | ||
| 245 | |||
| 246 | lengths.a = cpu_to_be64(req->assoclen * 8); | ||
| 247 | lengths.b = cpu_to_be64(gctx->cryptlen * 8); | ||
| 248 | memcpy(pctx->iauth_tag, &lengths, 16); | ||
| 249 | sg_init_one(pctx->src, pctx->iauth_tag, 16); | ||
| 250 | ahash_request_set_callback(ahreq, aead_request_flags(req), | ||
| 251 | gcm_hash_len_done, req); | ||
| 252 | ahash_request_set_crypt(ahreq, pctx->src, | ||
| 253 | NULL, sizeof(lengths)); | ||
| 254 | |||
| 255 | return crypto_ahash_update(ahreq); | ||
| 256 | } | ||
| 257 | |||
| 258 | static int gcm_hash_final(struct aead_request *req, | ||
| 259 | struct crypto_gcm_req_priv_ctx *pctx) | ||
| 260 | { | ||
| 261 | struct ahash_request *ahreq = &pctx->u.ahreq; | ||
| 262 | |||
| 263 | ahash_request_set_callback(ahreq, aead_request_flags(req), | ||
| 264 | gcm_hash_final_done, req); | ||
| 265 | ahash_request_set_crypt(ahreq, NULL, pctx->iauth_tag, 0); | ||
| 266 | |||
| 267 | return crypto_ahash_final(ahreq); | ||
| 268 | } | ||
| 269 | |||
| 270 | static void gcm_hash_final_done(struct crypto_async_request *areq, | ||
| 271 | int err) | ||
| 272 | { | ||
| 273 | struct aead_request *req = areq->data; | ||
| 309 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 274 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
| 310 | u8 *auth_tag = pctx->auth_tag; | 275 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
| 311 | struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; | 276 | |
| 277 | if (!err) | ||
| 278 | crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16); | ||
| 312 | 279 | ||
| 313 | crypto_gcm_ghash_update_sg(ghash, req->dst, req->cryptlen); | 280 | gctx->complete(areq, err); |
| 314 | crypto_gcm_ghash_final_xor(ghash, req->assoclen, req->cryptlen, | 281 | } |
| 315 | auth_tag); | 282 | |
| 283 | static void gcm_hash_len_done(struct crypto_async_request *areq, | ||
| 284 | int err) | ||
| 285 | { | ||
| 286 | struct aead_request *req = areq->data; | ||
| 287 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
| 288 | |||
| 289 | if (!err) { | ||
| 290 | err = gcm_hash_final(req, pctx); | ||
| 291 | if (err == -EINPROGRESS || err == -EBUSY) | ||
| 292 | return; | ||
| 293 | } | ||
| 294 | |||
| 295 | gcm_hash_final_done(areq, err); | ||
| 296 | } | ||
| 297 | |||
| 298 | static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq, | ||
| 299 | int err) | ||
| 300 | { | ||
| 301 | struct aead_request *req = areq->data; | ||
| 302 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
| 303 | |||
| 304 | if (!err) { | ||
| 305 | err = gcm_hash_len(req, pctx); | ||
| 306 | if (err == -EINPROGRESS || err == -EBUSY) | ||
| 307 | return; | ||
| 308 | } | ||
| 309 | |||
| 310 | gcm_hash_len_done(areq, err); | ||
| 311 | } | ||
| 312 | |||
| 313 | static void gcm_hash_crypt_done(struct crypto_async_request *areq, | ||
| 314 | int err) | ||
| 315 | { | ||
| 316 | struct aead_request *req = areq->data; | ||
| 317 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
| 318 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | ||
| 319 | unsigned int remain; | ||
| 320 | |||
| 321 | if (!err) { | ||
| 322 | remain = gcm_remain(gctx->cryptlen); | ||
| 323 | BUG_ON(!remain); | ||
| 324 | err = gcm_hash_remain(req, pctx, remain, | ||
| 325 | gcm_hash_crypt_remain_done); | ||
| 326 | if (err == -EINPROGRESS || err == -EBUSY) | ||
| 327 | return; | ||
| 328 | } | ||
| 329 | |||
| 330 | gcm_hash_crypt_remain_done(areq, err); | ||
| 331 | } | ||
| 332 | |||
| 333 | static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq, | ||
| 334 | int err) | ||
| 335 | { | ||
| 336 | struct aead_request *req = areq->data; | ||
| 337 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
| 338 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | ||
| 339 | crypto_completion_t complete; | ||
| 340 | unsigned int remain = 0; | ||
| 341 | |||
| 342 | if (!err && gctx->cryptlen) { | ||
| 343 | remain = gcm_remain(gctx->cryptlen); | ||
| 344 | complete = remain ? gcm_hash_crypt_done : | ||
| 345 | gcm_hash_crypt_remain_done; | ||
| 346 | err = gcm_hash_update(req, pctx, complete, | ||
| 347 | gctx->src, gctx->cryptlen); | ||
| 348 | if (err == -EINPROGRESS || err == -EBUSY) | ||
| 349 | return; | ||
| 350 | } | ||
| 351 | |||
| 352 | if (remain) | ||
| 353 | gcm_hash_crypt_done(areq, err); | ||
| 354 | else | ||
| 355 | gcm_hash_crypt_remain_done(areq, err); | ||
| 356 | } | ||
| 357 | |||
| 358 | static void gcm_hash_assoc_done(struct crypto_async_request *areq, | ||
| 359 | int err) | ||
| 360 | { | ||
| 361 | struct aead_request *req = areq->data; | ||
| 362 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
| 363 | unsigned int remain; | ||
| 364 | |||
| 365 | if (!err) { | ||
| 366 | remain = gcm_remain(req->assoclen); | ||
| 367 | BUG_ON(!remain); | ||
| 368 | err = gcm_hash_remain(req, pctx, remain, | ||
| 369 | gcm_hash_assoc_remain_done); | ||
| 370 | if (err == -EINPROGRESS || err == -EBUSY) | ||
| 371 | return; | ||
| 372 | } | ||
| 373 | |||
| 374 | gcm_hash_assoc_remain_done(areq, err); | ||
| 375 | } | ||
| 376 | |||
| 377 | static void gcm_hash_init_done(struct crypto_async_request *areq, | ||
| 378 | int err) | ||
| 379 | { | ||
| 380 | struct aead_request *req = areq->data; | ||
| 381 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
| 382 | crypto_completion_t complete; | ||
| 383 | unsigned int remain = 0; | ||
| 384 | |||
| 385 | if (!err && req->assoclen) { | ||
| 386 | remain = gcm_remain(req->assoclen); | ||
| 387 | complete = remain ? gcm_hash_assoc_done : | ||
| 388 | gcm_hash_assoc_remain_done; | ||
| 389 | err = gcm_hash_update(req, pctx, complete, | ||
| 390 | req->assoc, req->assoclen); | ||
| 391 | if (err == -EINPROGRESS || err == -EBUSY) | ||
| 392 | return; | ||
| 393 | } | ||
| 394 | |||
| 395 | if (remain) | ||
| 396 | gcm_hash_assoc_done(areq, err); | ||
| 397 | else | ||
| 398 | gcm_hash_assoc_remain_done(areq, err); | ||
| 399 | } | ||
| 400 | |||
| 401 | static int gcm_hash(struct aead_request *req, | ||
| 402 | struct crypto_gcm_req_priv_ctx *pctx) | ||
| 403 | { | ||
| 404 | struct ahash_request *ahreq = &pctx->u.ahreq; | ||
| 405 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | ||
| 406 | struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | ||
| 407 | unsigned int remain; | ||
| 408 | crypto_completion_t complete; | ||
| 409 | int err; | ||
| 410 | |||
| 411 | ahash_request_set_tfm(ahreq, ctx->ghash); | ||
| 412 | |||
| 413 | ahash_request_set_callback(ahreq, aead_request_flags(req), | ||
| 414 | gcm_hash_init_done, req); | ||
| 415 | err = crypto_ahash_init(ahreq); | ||
| 416 | if (err) | ||
| 417 | return err; | ||
| 418 | remain = gcm_remain(req->assoclen); | ||
| 419 | complete = remain ? gcm_hash_assoc_done : gcm_hash_assoc_remain_done; | ||
| 420 | err = gcm_hash_update(req, pctx, complete, req->assoc, req->assoclen); | ||
| 421 | if (err) | ||
| 422 | return err; | ||
| 423 | if (remain) { | ||
| 424 | err = gcm_hash_remain(req, pctx, remain, | ||
| 425 | gcm_hash_assoc_remain_done); | ||
| 426 | if (err) | ||
| 427 | return err; | ||
| 428 | } | ||
| 429 | remain = gcm_remain(gctx->cryptlen); | ||
| 430 | complete = remain ? gcm_hash_crypt_done : gcm_hash_crypt_remain_done; | ||
| 431 | err = gcm_hash_update(req, pctx, complete, gctx->src, gctx->cryptlen); | ||
| 432 | if (err) | ||
| 433 | return err; | ||
| 434 | if (remain) { | ||
| 435 | err = gcm_hash_remain(req, pctx, remain, | ||
| 436 | gcm_hash_crypt_remain_done); | ||
| 437 | if (err) | ||
| 438 | return err; | ||
| 439 | } | ||
| 440 | err = gcm_hash_len(req, pctx); | ||
| 441 | if (err) | ||
| 442 | return err; | ||
| 443 | err = gcm_hash_final(req, pctx); | ||
| 444 | if (err) | ||
| 445 | return err; | ||
| 446 | |||
| 447 | return 0; | ||
| 448 | } | ||
| 449 | |||
| 450 | static void gcm_enc_copy_hash(struct aead_request *req, | ||
| 451 | struct crypto_gcm_req_priv_ctx *pctx) | ||
| 452 | { | ||
| 453 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
| 454 | u8 *auth_tag = pctx->auth_tag; | ||
| 316 | 455 | ||
| 317 | scatterwalk_map_and_copy(auth_tag, req->dst, req->cryptlen, | 456 | scatterwalk_map_and_copy(auth_tag, req->dst, req->cryptlen, |
| 318 | crypto_aead_authsize(aead), 1); | 457 | crypto_aead_authsize(aead), 1); |
| 319 | return 0; | ||
| 320 | } | 458 | } |
| 321 | 459 | ||
| 322 | static void crypto_gcm_encrypt_done(struct crypto_async_request *areq, int err) | 460 | static void gcm_enc_hash_done(struct crypto_async_request *areq, |
| 461 | int err) | ||
| 323 | { | 462 | { |
| 324 | struct aead_request *req = areq->data; | 463 | struct aead_request *req = areq->data; |
| 464 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
| 325 | 465 | ||
| 326 | if (!err) | 466 | if (!err) |
| 327 | err = crypto_gcm_hash(req); | 467 | gcm_enc_copy_hash(req, pctx); |
| 328 | 468 | ||
| 329 | aead_request_complete(req, err); | 469 | aead_request_complete(req, err); |
| 330 | } | 470 | } |
| 331 | 471 | ||
| 472 | static void gcm_encrypt_done(struct crypto_async_request *areq, | ||
| 473 | int err) | ||
| 474 | { | ||
| 475 | struct aead_request *req = areq->data; | ||
| 476 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
| 477 | |||
| 478 | if (!err) { | ||
| 479 | err = gcm_hash(req, pctx); | ||
| 480 | if (err == -EINPROGRESS || err == -EBUSY) | ||
| 481 | return; | ||
| 482 | } | ||
| 483 | |||
| 484 | gcm_enc_hash_done(areq, err); | ||
| 485 | } | ||
| 486 | |||
| 332 | static int crypto_gcm_encrypt(struct aead_request *req) | 487 | static int crypto_gcm_encrypt(struct aead_request *req) |
| 333 | { | 488 | { |
| 334 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 489 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
| 335 | struct ablkcipher_request *abreq = &pctx->abreq; | 490 | struct ablkcipher_request *abreq = &pctx->u.abreq; |
| 491 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | ||
| 336 | int err; | 492 | int err; |
| 337 | 493 | ||
| 338 | crypto_gcm_init_crypt(abreq, req, req->cryptlen); | 494 | crypto_gcm_init_crypt(abreq, req, req->cryptlen); |
| 339 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | 495 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), |
| 340 | crypto_gcm_encrypt_done, req); | 496 | gcm_encrypt_done, req); |
| 497 | |||
| 498 | gctx->src = req->dst; | ||
| 499 | gctx->cryptlen = req->cryptlen; | ||
| 500 | gctx->complete = gcm_enc_hash_done; | ||
| 341 | 501 | ||
| 342 | err = crypto_ablkcipher_encrypt(abreq); | 502 | err = crypto_ablkcipher_encrypt(abreq); |
| 343 | if (err) | 503 | if (err) |
| 344 | return err; | 504 | return err; |
| 345 | 505 | ||
| 346 | return crypto_gcm_hash(req); | 506 | err = gcm_hash(req, pctx); |
| 507 | if (err) | ||
| 508 | return err; | ||
| 509 | |||
| 510 | crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16); | ||
| 511 | gcm_enc_copy_hash(req, pctx); | ||
| 512 | |||
| 513 | return 0; | ||
| 347 | } | 514 | } |
| 348 | 515 | ||
| 349 | static int crypto_gcm_verify(struct aead_request *req) | 516 | static int crypto_gcm_verify(struct aead_request *req, |
| 517 | struct crypto_gcm_req_priv_ctx *pctx) | ||
| 350 | { | 518 | { |
| 351 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 519 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
| 352 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
| 353 | struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; | ||
| 354 | u8 *auth_tag = pctx->auth_tag; | 520 | u8 *auth_tag = pctx->auth_tag; |
| 355 | u8 *iauth_tag = pctx->iauth_tag; | 521 | u8 *iauth_tag = pctx->iauth_tag; |
| 356 | unsigned int authsize = crypto_aead_authsize(aead); | 522 | unsigned int authsize = crypto_aead_authsize(aead); |
| 357 | unsigned int cryptlen = req->cryptlen - authsize; | 523 | unsigned int cryptlen = req->cryptlen - authsize; |
| 358 | 524 | ||
| 359 | crypto_gcm_ghash_final_xor(ghash, req->assoclen, cryptlen, auth_tag); | 525 | crypto_xor(auth_tag, iauth_tag, 16); |
| 360 | |||
| 361 | authsize = crypto_aead_authsize(aead); | ||
| 362 | scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0); | 526 | scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0); |
| 363 | return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; | 527 | return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; |
| 364 | } | 528 | } |
| 365 | 529 | ||
| 366 | static void crypto_gcm_decrypt_done(struct crypto_async_request *areq, int err) | 530 | static void gcm_decrypt_done(struct crypto_async_request *areq, int err) |
| 367 | { | 531 | { |
| 368 | struct aead_request *req = areq->data; | 532 | struct aead_request *req = areq->data; |
| 533 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
| 369 | 534 | ||
| 370 | if (!err) | 535 | if (!err) |
| 371 | err = crypto_gcm_verify(req); | 536 | err = crypto_gcm_verify(req, pctx); |
| 372 | 537 | ||
| 373 | aead_request_complete(req, err); | 538 | aead_request_complete(req, err); |
| 374 | } | 539 | } |
| 375 | 540 | ||
| 541 | static void gcm_dec_hash_done(struct crypto_async_request *areq, int err) | ||
| 542 | { | ||
| 543 | struct aead_request *req = areq->data; | ||
| 544 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
| 545 | struct ablkcipher_request *abreq = &pctx->u.abreq; | ||
| 546 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | ||
| 547 | |||
| 548 | if (!err) { | ||
| 549 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | ||
| 550 | gcm_decrypt_done, req); | ||
| 551 | crypto_gcm_init_crypt(abreq, req, gctx->cryptlen); | ||
| 552 | err = crypto_ablkcipher_decrypt(abreq); | ||
| 553 | if (err == -EINPROGRESS || err == -EBUSY) | ||
| 554 | return; | ||
| 555 | } | ||
| 556 | |||
| 557 | gcm_decrypt_done(areq, err); | ||
| 558 | } | ||
| 559 | |||
| 376 | static int crypto_gcm_decrypt(struct aead_request *req) | 560 | static int crypto_gcm_decrypt(struct aead_request *req) |
| 377 | { | 561 | { |
| 378 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 562 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
| 379 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 563 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
| 380 | struct ablkcipher_request *abreq = &pctx->abreq; | 564 | struct ablkcipher_request *abreq = &pctx->u.abreq; |
| 381 | struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; | 565 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
| 382 | unsigned int cryptlen = req->cryptlen; | ||
| 383 | unsigned int authsize = crypto_aead_authsize(aead); | 566 | unsigned int authsize = crypto_aead_authsize(aead); |
| 567 | unsigned int cryptlen = req->cryptlen; | ||
| 384 | int err; | 568 | int err; |
| 385 | 569 | ||
| 386 | if (cryptlen < authsize) | 570 | if (cryptlen < authsize) |
| 387 | return -EINVAL; | 571 | return -EINVAL; |
| 388 | cryptlen -= authsize; | 572 | cryptlen -= authsize; |
| 389 | 573 | ||
| 390 | crypto_gcm_init_crypt(abreq, req, cryptlen); | 574 | gctx->src = req->src; |
| 391 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | 575 | gctx->cryptlen = cryptlen; |
| 392 | crypto_gcm_decrypt_done, req); | 576 | gctx->complete = gcm_dec_hash_done; |
| 393 | 577 | ||
| 394 | crypto_gcm_ghash_update_sg(ghash, req->src, cryptlen); | 578 | err = gcm_hash(req, pctx); |
| 579 | if (err) | ||
| 580 | return err; | ||
| 395 | 581 | ||
| 582 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | ||
| 583 | gcm_decrypt_done, req); | ||
| 584 | crypto_gcm_init_crypt(abreq, req, cryptlen); | ||
| 396 | err = crypto_ablkcipher_decrypt(abreq); | 585 | err = crypto_ablkcipher_decrypt(abreq); |
| 397 | if (err) | 586 | if (err) |
| 398 | return err; | 587 | return err; |
| 399 | 588 | ||
| 400 | return crypto_gcm_verify(req); | 589 | return crypto_gcm_verify(req, pctx); |
| 401 | } | 590 | } |
| 402 | 591 | ||
| 403 | static int crypto_gcm_init_tfm(struct crypto_tfm *tfm) | 592 | static int crypto_gcm_init_tfm(struct crypto_tfm *tfm) |
| @@ -406,43 +595,56 @@ static int crypto_gcm_init_tfm(struct crypto_tfm *tfm) | |||
| 406 | struct gcm_instance_ctx *ictx = crypto_instance_ctx(inst); | 595 | struct gcm_instance_ctx *ictx = crypto_instance_ctx(inst); |
| 407 | struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm); | 596 | struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm); |
| 408 | struct crypto_ablkcipher *ctr; | 597 | struct crypto_ablkcipher *ctr; |
| 598 | struct crypto_ahash *ghash; | ||
| 409 | unsigned long align; | 599 | unsigned long align; |
| 410 | int err; | 600 | int err; |
| 411 | 601 | ||
| 602 | ghash = crypto_spawn_ahash(&ictx->ghash); | ||
| 603 | if (IS_ERR(ghash)) | ||
| 604 | return PTR_ERR(ghash); | ||
| 605 | |||
| 412 | ctr = crypto_spawn_skcipher(&ictx->ctr); | 606 | ctr = crypto_spawn_skcipher(&ictx->ctr); |
| 413 | err = PTR_ERR(ctr); | 607 | err = PTR_ERR(ctr); |
| 414 | if (IS_ERR(ctr)) | 608 | if (IS_ERR(ctr)) |
| 415 | return err; | 609 | goto err_free_hash; |
| 416 | 610 | ||
| 417 | ctx->ctr = ctr; | 611 | ctx->ctr = ctr; |
| 418 | ctx->gf128 = NULL; | 612 | ctx->ghash = ghash; |
| 419 | 613 | ||
| 420 | align = crypto_tfm_alg_alignmask(tfm); | 614 | align = crypto_tfm_alg_alignmask(tfm); |
| 421 | align &= ~(crypto_tfm_ctx_alignment() - 1); | 615 | align &= ~(crypto_tfm_ctx_alignment() - 1); |
| 422 | tfm->crt_aead.reqsize = align + | 616 | tfm->crt_aead.reqsize = align + |
| 423 | sizeof(struct crypto_gcm_req_priv_ctx) + | 617 | offsetof(struct crypto_gcm_req_priv_ctx, u) + |
| 424 | crypto_ablkcipher_reqsize(ctr); | 618 | max(sizeof(struct ablkcipher_request) + |
| 619 | crypto_ablkcipher_reqsize(ctr), | ||
| 620 | sizeof(struct ahash_request) + | ||
| 621 | crypto_ahash_reqsize(ghash)); | ||
| 425 | 622 | ||
| 426 | return 0; | 623 | return 0; |
| 624 | |||
| 625 | err_free_hash: | ||
| 626 | crypto_free_ahash(ghash); | ||
| 627 | return err; | ||
| 427 | } | 628 | } |
| 428 | 629 | ||
| 429 | static void crypto_gcm_exit_tfm(struct crypto_tfm *tfm) | 630 | static void crypto_gcm_exit_tfm(struct crypto_tfm *tfm) |
| 430 | { | 631 | { |
| 431 | struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm); | 632 | struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm); |
| 432 | 633 | ||
| 433 | if (ctx->gf128 != NULL) | 634 | crypto_free_ahash(ctx->ghash); |
| 434 | gf128mul_free_4k(ctx->gf128); | ||
| 435 | |||
| 436 | crypto_free_ablkcipher(ctx->ctr); | 635 | crypto_free_ablkcipher(ctx->ctr); |
| 437 | } | 636 | } |
| 438 | 637 | ||
| 439 | static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, | 638 | static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, |
| 440 | const char *full_name, | 639 | const char *full_name, |
| 441 | const char *ctr_name) | 640 | const char *ctr_name, |
| 641 | const char *ghash_name) | ||
| 442 | { | 642 | { |
| 443 | struct crypto_attr_type *algt; | 643 | struct crypto_attr_type *algt; |
| 444 | struct crypto_instance *inst; | 644 | struct crypto_instance *inst; |
| 445 | struct crypto_alg *ctr; | 645 | struct crypto_alg *ctr; |
| 646 | struct crypto_alg *ghash_alg; | ||
| 647 | struct ahash_alg *ghash_ahash_alg; | ||
| 446 | struct gcm_instance_ctx *ctx; | 648 | struct gcm_instance_ctx *ctx; |
| 447 | int err; | 649 | int err; |
| 448 | 650 | ||
| @@ -454,17 +656,31 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, | |||
| 454 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) | 656 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) |
| 455 | return ERR_PTR(-EINVAL); | 657 | return ERR_PTR(-EINVAL); |
| 456 | 658 | ||
| 659 | ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type, | ||
| 660 | CRYPTO_ALG_TYPE_HASH, | ||
| 661 | CRYPTO_ALG_TYPE_AHASH_MASK); | ||
| 662 | err = PTR_ERR(ghash_alg); | ||
| 663 | if (IS_ERR(ghash_alg)) | ||
| 664 | return ERR_PTR(err); | ||
| 665 | |||
| 666 | err = -ENOMEM; | ||
| 457 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | 667 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); |
| 458 | if (!inst) | 668 | if (!inst) |
| 459 | return ERR_PTR(-ENOMEM); | 669 | goto out_put_ghash; |
| 460 | 670 | ||
| 461 | ctx = crypto_instance_ctx(inst); | 671 | ctx = crypto_instance_ctx(inst); |
| 672 | ghash_ahash_alg = container_of(ghash_alg, struct ahash_alg, halg.base); | ||
| 673 | err = crypto_init_ahash_spawn(&ctx->ghash, &ghash_ahash_alg->halg, | ||
| 674 | inst); | ||
| 675 | if (err) | ||
| 676 | goto err_free_inst; | ||
| 677 | |||
| 462 | crypto_set_skcipher_spawn(&ctx->ctr, inst); | 678 | crypto_set_skcipher_spawn(&ctx->ctr, inst); |
| 463 | err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0, | 679 | err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0, |
| 464 | crypto_requires_sync(algt->type, | 680 | crypto_requires_sync(algt->type, |
| 465 | algt->mask)); | 681 | algt->mask)); |
| 466 | if (err) | 682 | if (err) |
| 467 | goto err_free_inst; | 683 | goto err_drop_ghash; |
| 468 | 684 | ||
| 469 | ctr = crypto_skcipher_spawn_alg(&ctx->ctr); | 685 | ctr = crypto_skcipher_spawn_alg(&ctx->ctr); |
| 470 | 686 | ||
| @@ -479,7 +695,8 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, | |||
| 479 | 695 | ||
| 480 | err = -ENAMETOOLONG; | 696 | err = -ENAMETOOLONG; |
| 481 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 697 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
| 482 | "gcm_base(%s)", ctr->cra_driver_name) >= | 698 | "gcm_base(%s,%s)", ctr->cra_driver_name, |
| 699 | ghash_alg->cra_driver_name) >= | ||
| 483 | CRYPTO_MAX_ALG_NAME) | 700 | CRYPTO_MAX_ALG_NAME) |
| 484 | goto out_put_ctr; | 701 | goto out_put_ctr; |
| 485 | 702 | ||
| @@ -502,12 +719,16 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, | |||
| 502 | inst->alg.cra_aead.decrypt = crypto_gcm_decrypt; | 719 | inst->alg.cra_aead.decrypt = crypto_gcm_decrypt; |
| 503 | 720 | ||
| 504 | out: | 721 | out: |
| 722 | crypto_mod_put(ghash_alg); | ||
| 505 | return inst; | 723 | return inst; |
| 506 | 724 | ||
| 507 | out_put_ctr: | 725 | out_put_ctr: |
| 508 | crypto_drop_skcipher(&ctx->ctr); | 726 | crypto_drop_skcipher(&ctx->ctr); |
| 727 | err_drop_ghash: | ||
| 728 | crypto_drop_ahash(&ctx->ghash); | ||
| 509 | err_free_inst: | 729 | err_free_inst: |
| 510 | kfree(inst); | 730 | kfree(inst); |
| 731 | out_put_ghash: | ||
| 511 | inst = ERR_PTR(err); | 732 | inst = ERR_PTR(err); |
| 512 | goto out; | 733 | goto out; |
| 513 | } | 734 | } |
| @@ -532,7 +753,7 @@ static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb) | |||
| 532 | CRYPTO_MAX_ALG_NAME) | 753 | CRYPTO_MAX_ALG_NAME) |
| 533 | return ERR_PTR(-ENAMETOOLONG); | 754 | return ERR_PTR(-ENAMETOOLONG); |
| 534 | 755 | ||
| 535 | return crypto_gcm_alloc_common(tb, full_name, ctr_name); | 756 | return crypto_gcm_alloc_common(tb, full_name, ctr_name, "ghash"); |
| 536 | } | 757 | } |
| 537 | 758 | ||
| 538 | static void crypto_gcm_free(struct crypto_instance *inst) | 759 | static void crypto_gcm_free(struct crypto_instance *inst) |
| @@ -540,6 +761,7 @@ static void crypto_gcm_free(struct crypto_instance *inst) | |||
| 540 | struct gcm_instance_ctx *ctx = crypto_instance_ctx(inst); | 761 | struct gcm_instance_ctx *ctx = crypto_instance_ctx(inst); |
| 541 | 762 | ||
| 542 | crypto_drop_skcipher(&ctx->ctr); | 763 | crypto_drop_skcipher(&ctx->ctr); |
| 764 | crypto_drop_ahash(&ctx->ghash); | ||
| 543 | kfree(inst); | 765 | kfree(inst); |
| 544 | } | 766 | } |
| 545 | 767 | ||
| @@ -554,6 +776,7 @@ static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb) | |||
| 554 | { | 776 | { |
| 555 | int err; | 777 | int err; |
| 556 | const char *ctr_name; | 778 | const char *ctr_name; |
| 779 | const char *ghash_name; | ||
| 557 | char full_name[CRYPTO_MAX_ALG_NAME]; | 780 | char full_name[CRYPTO_MAX_ALG_NAME]; |
| 558 | 781 | ||
| 559 | ctr_name = crypto_attr_alg_name(tb[1]); | 782 | ctr_name = crypto_attr_alg_name(tb[1]); |
| @@ -561,11 +784,16 @@ static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb) | |||
| 561 | if (IS_ERR(ctr_name)) | 784 | if (IS_ERR(ctr_name)) |
| 562 | return ERR_PTR(err); | 785 | return ERR_PTR(err); |
| 563 | 786 | ||
| 564 | if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s)", | 787 | ghash_name = crypto_attr_alg_name(tb[2]); |
| 565 | ctr_name) >= CRYPTO_MAX_ALG_NAME) | 788 | err = PTR_ERR(ghash_name); |
| 789 | if (IS_ERR(ghash_name)) | ||
| 790 | return ERR_PTR(err); | ||
| 791 | |||
| 792 | if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)", | ||
| 793 | ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME) | ||
| 566 | return ERR_PTR(-ENAMETOOLONG); | 794 | return ERR_PTR(-ENAMETOOLONG); |
| 567 | 795 | ||
| 568 | return crypto_gcm_alloc_common(tb, full_name, ctr_name); | 796 | return crypto_gcm_alloc_common(tb, full_name, ctr_name, ghash_name); |
| 569 | } | 797 | } |
| 570 | 798 | ||
| 571 | static struct crypto_template crypto_gcm_base_tmpl = { | 799 | static struct crypto_template crypto_gcm_base_tmpl = { |
| @@ -784,6 +1012,10 @@ static int __init crypto_gcm_module_init(void) | |||
| 784 | { | 1012 | { |
| 785 | int err; | 1013 | int err; |
| 786 | 1014 | ||
| 1015 | gcm_zeroes = kzalloc(16, GFP_KERNEL); | ||
| 1016 | if (!gcm_zeroes) | ||
| 1017 | return -ENOMEM; | ||
| 1018 | |||
| 787 | err = crypto_register_template(&crypto_gcm_base_tmpl); | 1019 | err = crypto_register_template(&crypto_gcm_base_tmpl); |
| 788 | if (err) | 1020 | if (err) |
| 789 | goto out; | 1021 | goto out; |
| @@ -796,18 +1028,20 @@ static int __init crypto_gcm_module_init(void) | |||
| 796 | if (err) | 1028 | if (err) |
| 797 | goto out_undo_gcm; | 1029 | goto out_undo_gcm; |
| 798 | 1030 | ||
| 799 | out: | 1031 | return 0; |
| 800 | return err; | ||
| 801 | 1032 | ||
| 802 | out_undo_gcm: | 1033 | out_undo_gcm: |
| 803 | crypto_unregister_template(&crypto_gcm_tmpl); | 1034 | crypto_unregister_template(&crypto_gcm_tmpl); |
| 804 | out_undo_base: | 1035 | out_undo_base: |
| 805 | crypto_unregister_template(&crypto_gcm_base_tmpl); | 1036 | crypto_unregister_template(&crypto_gcm_base_tmpl); |
| 806 | goto out; | 1037 | out: |
| 1038 | kfree(gcm_zeroes); | ||
| 1039 | return err; | ||
| 807 | } | 1040 | } |
| 808 | 1041 | ||
| 809 | static void __exit crypto_gcm_module_exit(void) | 1042 | static void __exit crypto_gcm_module_exit(void) |
| 810 | { | 1043 | { |
| 1044 | kfree(gcm_zeroes); | ||
| 811 | crypto_unregister_template(&crypto_rfc4106_tmpl); | 1045 | crypto_unregister_template(&crypto_rfc4106_tmpl); |
| 812 | crypto_unregister_template(&crypto_gcm_tmpl); | 1046 | crypto_unregister_template(&crypto_gcm_tmpl); |
| 813 | crypto_unregister_template(&crypto_gcm_base_tmpl); | 1047 | crypto_unregister_template(&crypto_gcm_base_tmpl); |
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c new file mode 100644 index 000000000000..be4425616931 --- /dev/null +++ b/crypto/ghash-generic.c | |||
| @@ -0,0 +1,170 @@ | |||
| 1 | /* | ||
| 2 | * GHASH: digest algorithm for GCM (Galois/Counter Mode). | ||
| 3 | * | ||
| 4 | * Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <mh1@iki.fi> | ||
| 5 | * Copyright (c) 2009 Intel Corp. | ||
| 6 | * Author: Huang Ying <ying.huang@intel.com> | ||
| 7 | * | ||
| 8 | * The algorithm implementation is copied from gcm.c. | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or modify it | ||
| 11 | * under the terms of the GNU General Public License version 2 as published | ||
| 12 | * by the Free Software Foundation. | ||
| 13 | */ | ||
| 14 | |||
| 15 | #include <crypto/algapi.h> | ||
| 16 | #include <crypto/gf128mul.h> | ||
| 17 | #include <crypto/internal/hash.h> | ||
| 18 | #include <linux/crypto.h> | ||
| 19 | #include <linux/init.h> | ||
| 20 | #include <linux/kernel.h> | ||
| 21 | #include <linux/module.h> | ||
| 22 | |||
| 23 | #define GHASH_BLOCK_SIZE 16 | ||
| 24 | #define GHASH_DIGEST_SIZE 16 | ||
| 25 | |||
| 26 | struct ghash_ctx { | ||
| 27 | struct gf128mul_4k *gf128; | ||
| 28 | }; | ||
| 29 | |||
| 30 | struct ghash_desc_ctx { | ||
| 31 | u8 buffer[GHASH_BLOCK_SIZE]; | ||
| 32 | u32 bytes; | ||
| 33 | }; | ||
| 34 | |||
| 35 | static int ghash_init(struct shash_desc *desc) | ||
| 36 | { | ||
| 37 | struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); | ||
| 38 | |||
| 39 | memset(dctx, 0, sizeof(*dctx)); | ||
| 40 | |||
| 41 | return 0; | ||
| 42 | } | ||
| 43 | |||
| 44 | static int ghash_setkey(struct crypto_shash *tfm, | ||
| 45 | const u8 *key, unsigned int keylen) | ||
| 46 | { | ||
| 47 | struct ghash_ctx *ctx = crypto_shash_ctx(tfm); | ||
| 48 | |||
| 49 | if (keylen != GHASH_BLOCK_SIZE) { | ||
| 50 | crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
| 51 | return -EINVAL; | ||
| 52 | } | ||
| 53 | |||
| 54 | if (ctx->gf128) | ||
| 55 | gf128mul_free_4k(ctx->gf128); | ||
| 56 | ctx->gf128 = gf128mul_init_4k_lle((be128 *)key); | ||
| 57 | if (!ctx->gf128) | ||
| 58 | return -ENOMEM; | ||
| 59 | |||
| 60 | return 0; | ||
| 61 | } | ||
| 62 | |||
| 63 | static int ghash_update(struct shash_desc *desc, | ||
| 64 | const u8 *src, unsigned int srclen) | ||
| 65 | { | ||
| 66 | struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); | ||
| 67 | struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); | ||
| 68 | u8 *dst = dctx->buffer; | ||
| 69 | |||
| 70 | if (dctx->bytes) { | ||
| 71 | int n = min(srclen, dctx->bytes); | ||
| 72 | u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes); | ||
| 73 | |||
| 74 | dctx->bytes -= n; | ||
| 75 | srclen -= n; | ||
| 76 | |||
| 77 | while (n--) | ||
| 78 | *pos++ ^= *src++; | ||
| 79 | |||
| 80 | if (!dctx->bytes) | ||
| 81 | gf128mul_4k_lle((be128 *)dst, ctx->gf128); | ||
| 82 | } | ||
| 83 | |||
| 84 | while (srclen >= GHASH_BLOCK_SIZE) { | ||
| 85 | crypto_xor(dst, src, GHASH_BLOCK_SIZE); | ||
| 86 | gf128mul_4k_lle((be128 *)dst, ctx->gf128); | ||
| 87 | src += GHASH_BLOCK_SIZE; | ||
| 88 | srclen -= GHASH_BLOCK_SIZE; | ||
| 89 | } | ||
| 90 | |||
| 91 | if (srclen) { | ||
| 92 | dctx->bytes = GHASH_BLOCK_SIZE - srclen; | ||
| 93 | while (srclen--) | ||
| 94 | *dst++ ^= *src++; | ||
| 95 | } | ||
| 96 | |||
| 97 | return 0; | ||
| 98 | } | ||
| 99 | |||
| 100 | static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx) | ||
| 101 | { | ||
| 102 | u8 *dst = dctx->buffer; | ||
| 103 | |||
| 104 | if (dctx->bytes) { | ||
| 105 | u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes); | ||
| 106 | |||
| 107 | while (dctx->bytes--) | ||
| 108 | *tmp++ ^= 0; | ||
| 109 | |||
| 110 | gf128mul_4k_lle((be128 *)dst, ctx->gf128); | ||
| 111 | } | ||
| 112 | |||
| 113 | dctx->bytes = 0; | ||
| 114 | } | ||
| 115 | |||
| 116 | static int ghash_final(struct shash_desc *desc, u8 *dst) | ||
| 117 | { | ||
| 118 | struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); | ||
| 119 | struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); | ||
| 120 | u8 *buf = dctx->buffer; | ||
| 121 | |||
| 122 | ghash_flush(ctx, dctx); | ||
| 123 | memcpy(dst, buf, GHASH_BLOCK_SIZE); | ||
| 124 | |||
| 125 | return 0; | ||
| 126 | } | ||
| 127 | |||
| 128 | static void ghash_exit_tfm(struct crypto_tfm *tfm) | ||
| 129 | { | ||
| 130 | struct ghash_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 131 | if (ctx->gf128) | ||
| 132 | gf128mul_free_4k(ctx->gf128); | ||
| 133 | } | ||
| 134 | |||
| 135 | static struct shash_alg ghash_alg = { | ||
| 136 | .digestsize = GHASH_DIGEST_SIZE, | ||
| 137 | .init = ghash_init, | ||
| 138 | .update = ghash_update, | ||
| 139 | .final = ghash_final, | ||
| 140 | .setkey = ghash_setkey, | ||
| 141 | .descsize = sizeof(struct ghash_desc_ctx), | ||
| 142 | .base = { | ||
| 143 | .cra_name = "ghash", | ||
| 144 | .cra_driver_name = "ghash-generic", | ||
| 145 | .cra_priority = 100, | ||
| 146 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
| 147 | .cra_blocksize = GHASH_BLOCK_SIZE, | ||
| 148 | .cra_ctxsize = sizeof(struct ghash_ctx), | ||
| 149 | .cra_module = THIS_MODULE, | ||
| 150 | .cra_list = LIST_HEAD_INIT(ghash_alg.base.cra_list), | ||
| 151 | .cra_exit = ghash_exit_tfm, | ||
| 152 | }, | ||
| 153 | }; | ||
| 154 | |||
| 155 | static int __init ghash_mod_init(void) | ||
| 156 | { | ||
| 157 | return crypto_register_shash(&ghash_alg); | ||
| 158 | } | ||
| 159 | |||
| 160 | static void __exit ghash_mod_exit(void) | ||
| 161 | { | ||
| 162 | crypto_unregister_shash(&ghash_alg); | ||
| 163 | } | ||
| 164 | |||
| 165 | module_init(ghash_mod_init); | ||
| 166 | module_exit(ghash_mod_exit); | ||
| 167 | |||
| 168 | MODULE_LICENSE("GPL"); | ||
| 169 | MODULE_DESCRIPTION("GHASH Message Digest Algorithm"); | ||
| 170 | MODULE_ALIAS("ghash"); | ||
diff --git a/crypto/hmac.c b/crypto/hmac.c index 0ad39c374963..15c2eb534541 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c | |||
| @@ -27,7 +27,7 @@ | |||
| 27 | #include <linux/string.h> | 27 | #include <linux/string.h> |
| 28 | 28 | ||
| 29 | struct hmac_ctx { | 29 | struct hmac_ctx { |
| 30 | struct crypto_hash *child; | 30 | struct crypto_shash *hash; |
| 31 | }; | 31 | }; |
| 32 | 32 | ||
| 33 | static inline void *align_ptr(void *p, unsigned int align) | 33 | static inline void *align_ptr(void *p, unsigned int align) |
| @@ -35,65 +35,45 @@ static inline void *align_ptr(void *p, unsigned int align) | |||
| 35 | return (void *)ALIGN((unsigned long)p, align); | 35 | return (void *)ALIGN((unsigned long)p, align); |
| 36 | } | 36 | } |
| 37 | 37 | ||
| 38 | static inline struct hmac_ctx *hmac_ctx(struct crypto_hash *tfm) | 38 | static inline struct hmac_ctx *hmac_ctx(struct crypto_shash *tfm) |
| 39 | { | 39 | { |
| 40 | return align_ptr(crypto_hash_ctx_aligned(tfm) + | 40 | return align_ptr(crypto_shash_ctx_aligned(tfm) + |
| 41 | crypto_hash_blocksize(tfm) * 2 + | 41 | crypto_shash_statesize(tfm) * 2, |
| 42 | crypto_hash_digestsize(tfm), sizeof(void *)); | 42 | crypto_tfm_ctx_alignment()); |
| 43 | } | 43 | } |
| 44 | 44 | ||
| 45 | static int hmac_setkey(struct crypto_hash *parent, | 45 | static int hmac_setkey(struct crypto_shash *parent, |
| 46 | const u8 *inkey, unsigned int keylen) | 46 | const u8 *inkey, unsigned int keylen) |
| 47 | { | 47 | { |
| 48 | int bs = crypto_hash_blocksize(parent); | 48 | int bs = crypto_shash_blocksize(parent); |
| 49 | int ds = crypto_hash_digestsize(parent); | 49 | int ds = crypto_shash_digestsize(parent); |
| 50 | char *ipad = crypto_hash_ctx_aligned(parent); | 50 | int ss = crypto_shash_statesize(parent); |
| 51 | char *opad = ipad + bs; | 51 | char *ipad = crypto_shash_ctx_aligned(parent); |
| 52 | char *digest = opad + bs; | 52 | char *opad = ipad + ss; |
| 53 | struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *)); | 53 | struct hmac_ctx *ctx = align_ptr(opad + ss, |
| 54 | struct crypto_hash *tfm = ctx->child; | 54 | crypto_tfm_ctx_alignment()); |
| 55 | struct crypto_shash *hash = ctx->hash; | ||
| 56 | struct { | ||
| 57 | struct shash_desc shash; | ||
| 58 | char ctx[crypto_shash_descsize(hash)]; | ||
| 59 | } desc; | ||
| 55 | unsigned int i; | 60 | unsigned int i; |
| 56 | 61 | ||
| 62 | desc.shash.tfm = hash; | ||
| 63 | desc.shash.flags = crypto_shash_get_flags(parent) & | ||
| 64 | CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 65 | |||
| 57 | if (keylen > bs) { | 66 | if (keylen > bs) { |
| 58 | struct hash_desc desc; | ||
| 59 | struct scatterlist tmp; | ||
| 60 | int tmplen; | ||
| 61 | int err; | 67 | int err; |
| 62 | 68 | ||
| 63 | desc.tfm = tfm; | 69 | err = crypto_shash_digest(&desc.shash, inkey, keylen, ipad); |
| 64 | desc.flags = crypto_hash_get_flags(parent); | ||
| 65 | desc.flags &= CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 66 | |||
| 67 | err = crypto_hash_init(&desc); | ||
| 68 | if (err) | 70 | if (err) |
| 69 | return err; | 71 | return err; |
| 70 | 72 | ||
| 71 | tmplen = bs * 2 + ds; | ||
| 72 | sg_init_one(&tmp, ipad, tmplen); | ||
| 73 | |||
| 74 | for (; keylen > tmplen; inkey += tmplen, keylen -= tmplen) { | ||
| 75 | memcpy(ipad, inkey, tmplen); | ||
| 76 | err = crypto_hash_update(&desc, &tmp, tmplen); | ||
| 77 | if (err) | ||
| 78 | return err; | ||
| 79 | } | ||
| 80 | |||
| 81 | if (keylen) { | ||
| 82 | memcpy(ipad, inkey, keylen); | ||
| 83 | err = crypto_hash_update(&desc, &tmp, keylen); | ||
| 84 | if (err) | ||
| 85 | return err; | ||
| 86 | } | ||
| 87 | |||
| 88 | err = crypto_hash_final(&desc, digest); | ||
| 89 | if (err) | ||
| 90 | return err; | ||
| 91 | |||
| 92 | inkey = digest; | ||
| 93 | keylen = ds; | 73 | keylen = ds; |
| 94 | } | 74 | } else |
| 75 | memcpy(ipad, inkey, keylen); | ||
| 95 | 76 | ||
| 96 | memcpy(ipad, inkey, keylen); | ||
| 97 | memset(ipad + keylen, 0, bs - keylen); | 77 | memset(ipad + keylen, 0, bs - keylen); |
| 98 | memcpy(opad, ipad, bs); | 78 | memcpy(opad, ipad, bs); |
| 99 | 79 | ||
| @@ -102,184 +82,178 @@ static int hmac_setkey(struct crypto_hash *parent, | |||
| 102 | opad[i] ^= 0x5c; | 82 | opad[i] ^= 0x5c; |
| 103 | } | 83 | } |
| 104 | 84 | ||
| 105 | return 0; | 85 | return crypto_shash_init(&desc.shash) ?: |
| 86 | crypto_shash_update(&desc.shash, ipad, bs) ?: | ||
| 87 | crypto_shash_export(&desc.shash, ipad) ?: | ||
| 88 | crypto_shash_init(&desc.shash) ?: | ||
| 89 | crypto_shash_update(&desc.shash, opad, bs) ?: | ||
| 90 | crypto_shash_export(&desc.shash, opad); | ||
| 106 | } | 91 | } |
| 107 | 92 | ||
| 108 | static int hmac_init(struct hash_desc *pdesc) | 93 | static int hmac_export(struct shash_desc *pdesc, void *out) |
| 109 | { | 94 | { |
| 110 | struct crypto_hash *parent = pdesc->tfm; | 95 | struct shash_desc *desc = shash_desc_ctx(pdesc); |
| 111 | int bs = crypto_hash_blocksize(parent); | ||
| 112 | int ds = crypto_hash_digestsize(parent); | ||
| 113 | char *ipad = crypto_hash_ctx_aligned(parent); | ||
| 114 | struct hmac_ctx *ctx = align_ptr(ipad + bs * 2 + ds, sizeof(void *)); | ||
| 115 | struct hash_desc desc; | ||
| 116 | struct scatterlist tmp; | ||
| 117 | int err; | ||
| 118 | 96 | ||
| 119 | desc.tfm = ctx->child; | 97 | desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
| 120 | desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 121 | sg_init_one(&tmp, ipad, bs); | ||
| 122 | 98 | ||
| 123 | err = crypto_hash_init(&desc); | 99 | return crypto_shash_export(desc, out); |
| 124 | if (unlikely(err)) | ||
| 125 | return err; | ||
| 126 | |||
| 127 | return crypto_hash_update(&desc, &tmp, bs); | ||
| 128 | } | 100 | } |
| 129 | 101 | ||
| 130 | static int hmac_update(struct hash_desc *pdesc, | 102 | static int hmac_import(struct shash_desc *pdesc, const void *in) |
| 131 | struct scatterlist *sg, unsigned int nbytes) | ||
| 132 | { | 103 | { |
| 104 | struct shash_desc *desc = shash_desc_ctx(pdesc); | ||
| 133 | struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm); | 105 | struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm); |
| 134 | struct hash_desc desc; | ||
| 135 | 106 | ||
| 136 | desc.tfm = ctx->child; | 107 | desc->tfm = ctx->hash; |
| 137 | desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 108 | desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
| 138 | 109 | ||
| 139 | return crypto_hash_update(&desc, sg, nbytes); | 110 | return crypto_shash_import(desc, in); |
| 140 | } | 111 | } |
| 141 | 112 | ||
| 142 | static int hmac_final(struct hash_desc *pdesc, u8 *out) | 113 | static int hmac_init(struct shash_desc *pdesc) |
| 143 | { | 114 | { |
| 144 | struct crypto_hash *parent = pdesc->tfm; | 115 | return hmac_import(pdesc, crypto_shash_ctx_aligned(pdesc->tfm)); |
| 145 | int bs = crypto_hash_blocksize(parent); | 116 | } |
| 146 | int ds = crypto_hash_digestsize(parent); | ||
| 147 | char *opad = crypto_hash_ctx_aligned(parent) + bs; | ||
| 148 | char *digest = opad + bs; | ||
| 149 | struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *)); | ||
| 150 | struct hash_desc desc; | ||
| 151 | struct scatterlist tmp; | ||
| 152 | int err; | ||
| 153 | 117 | ||
| 154 | desc.tfm = ctx->child; | 118 | static int hmac_update(struct shash_desc *pdesc, |
| 155 | desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 119 | const u8 *data, unsigned int nbytes) |
| 156 | sg_init_one(&tmp, opad, bs + ds); | 120 | { |
| 121 | struct shash_desc *desc = shash_desc_ctx(pdesc); | ||
| 157 | 122 | ||
| 158 | err = crypto_hash_final(&desc, digest); | 123 | desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
| 159 | if (unlikely(err)) | ||
| 160 | return err; | ||
| 161 | 124 | ||
| 162 | return crypto_hash_digest(&desc, &tmp, bs + ds, out); | 125 | return crypto_shash_update(desc, data, nbytes); |
| 163 | } | 126 | } |
| 164 | 127 | ||
| 165 | static int hmac_digest(struct hash_desc *pdesc, struct scatterlist *sg, | 128 | static int hmac_final(struct shash_desc *pdesc, u8 *out) |
| 166 | unsigned int nbytes, u8 *out) | ||
| 167 | { | 129 | { |
| 168 | struct crypto_hash *parent = pdesc->tfm; | 130 | struct crypto_shash *parent = pdesc->tfm; |
| 169 | int bs = crypto_hash_blocksize(parent); | 131 | int ds = crypto_shash_digestsize(parent); |
| 170 | int ds = crypto_hash_digestsize(parent); | 132 | int ss = crypto_shash_statesize(parent); |
| 171 | char *ipad = crypto_hash_ctx_aligned(parent); | 133 | char *opad = crypto_shash_ctx_aligned(parent) + ss; |
| 172 | char *opad = ipad + bs; | 134 | struct shash_desc *desc = shash_desc_ctx(pdesc); |
| 173 | char *digest = opad + bs; | ||
| 174 | struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *)); | ||
| 175 | struct hash_desc desc; | ||
| 176 | struct scatterlist sg1[2]; | ||
| 177 | struct scatterlist sg2[1]; | ||
| 178 | int err; | ||
| 179 | 135 | ||
| 180 | desc.tfm = ctx->child; | 136 | desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
| 181 | desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 182 | 137 | ||
| 183 | sg_init_table(sg1, 2); | 138 | return crypto_shash_final(desc, out) ?: |
| 184 | sg_set_buf(sg1, ipad, bs); | 139 | crypto_shash_import(desc, opad) ?: |
| 185 | scatterwalk_sg_chain(sg1, 2, sg); | 140 | crypto_shash_finup(desc, out, ds, out); |
| 141 | } | ||
| 186 | 142 | ||
| 187 | sg_init_table(sg2, 1); | 143 | static int hmac_finup(struct shash_desc *pdesc, const u8 *data, |
| 188 | sg_set_buf(sg2, opad, bs + ds); | 144 | unsigned int nbytes, u8 *out) |
| 145 | { | ||
| 189 | 146 | ||
| 190 | err = crypto_hash_digest(&desc, sg1, nbytes + bs, digest); | 147 | struct crypto_shash *parent = pdesc->tfm; |
| 191 | if (unlikely(err)) | 148 | int ds = crypto_shash_digestsize(parent); |
| 192 | return err; | 149 | int ss = crypto_shash_statesize(parent); |
| 150 | char *opad = crypto_shash_ctx_aligned(parent) + ss; | ||
| 151 | struct shash_desc *desc = shash_desc_ctx(pdesc); | ||
| 193 | 152 | ||
| 194 | return crypto_hash_digest(&desc, sg2, bs + ds, out); | 153 | desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
| 154 | |||
| 155 | return crypto_shash_finup(desc, data, nbytes, out) ?: | ||
| 156 | crypto_shash_import(desc, opad) ?: | ||
| 157 | crypto_shash_finup(desc, out, ds, out); | ||
| 195 | } | 158 | } |
| 196 | 159 | ||
| 197 | static int hmac_init_tfm(struct crypto_tfm *tfm) | 160 | static int hmac_init_tfm(struct crypto_tfm *tfm) |
| 198 | { | 161 | { |
| 199 | struct crypto_hash *hash; | 162 | struct crypto_shash *parent = __crypto_shash_cast(tfm); |
| 163 | struct crypto_shash *hash; | ||
| 200 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 164 | struct crypto_instance *inst = (void *)tfm->__crt_alg; |
| 201 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | 165 | struct crypto_shash_spawn *spawn = crypto_instance_ctx(inst); |
| 202 | struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm)); | 166 | struct hmac_ctx *ctx = hmac_ctx(parent); |
| 203 | 167 | ||
| 204 | hash = crypto_spawn_hash(spawn); | 168 | hash = crypto_spawn_shash(spawn); |
| 205 | if (IS_ERR(hash)) | 169 | if (IS_ERR(hash)) |
| 206 | return PTR_ERR(hash); | 170 | return PTR_ERR(hash); |
| 207 | 171 | ||
| 208 | ctx->child = hash; | 172 | parent->descsize = sizeof(struct shash_desc) + |
| 173 | crypto_shash_descsize(hash); | ||
| 174 | |||
| 175 | ctx->hash = hash; | ||
| 209 | return 0; | 176 | return 0; |
| 210 | } | 177 | } |
| 211 | 178 | ||
| 212 | static void hmac_exit_tfm(struct crypto_tfm *tfm) | 179 | static void hmac_exit_tfm(struct crypto_tfm *tfm) |
| 213 | { | 180 | { |
| 214 | struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm)); | 181 | struct hmac_ctx *ctx = hmac_ctx(__crypto_shash_cast(tfm)); |
| 215 | crypto_free_hash(ctx->child); | 182 | crypto_free_shash(ctx->hash); |
| 216 | } | 183 | } |
| 217 | 184 | ||
| 218 | static void hmac_free(struct crypto_instance *inst) | 185 | static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) |
| 219 | { | 186 | { |
| 220 | crypto_drop_spawn(crypto_instance_ctx(inst)); | 187 | struct shash_instance *inst; |
| 221 | kfree(inst); | ||
| 222 | } | ||
| 223 | |||
| 224 | static struct crypto_instance *hmac_alloc(struct rtattr **tb) | ||
| 225 | { | ||
| 226 | struct crypto_instance *inst; | ||
| 227 | struct crypto_alg *alg; | 188 | struct crypto_alg *alg; |
| 189 | struct shash_alg *salg; | ||
| 228 | int err; | 190 | int err; |
| 229 | int ds; | 191 | int ds; |
| 192 | int ss; | ||
| 230 | 193 | ||
| 231 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH); | 194 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); |
| 232 | if (err) | 195 | if (err) |
| 233 | return ERR_PTR(err); | 196 | return err; |
| 234 | 197 | ||
| 235 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH, | 198 | salg = shash_attr_alg(tb[1], 0, 0); |
| 236 | CRYPTO_ALG_TYPE_HASH_MASK); | 199 | if (IS_ERR(salg)) |
| 237 | if (IS_ERR(alg)) | 200 | return PTR_ERR(salg); |
| 238 | return ERR_CAST(alg); | 201 | |
| 239 | 202 | err = -EINVAL; | |
| 240 | inst = ERR_PTR(-EINVAL); | 203 | ds = salg->digestsize; |
| 241 | ds = alg->cra_type == &crypto_hash_type ? | 204 | ss = salg->statesize; |
| 242 | alg->cra_hash.digestsize : | 205 | alg = &salg->base; |
| 243 | alg->cra_type ? | 206 | if (ds > alg->cra_blocksize || |
| 244 | __crypto_shash_alg(alg)->digestsize : | 207 | ss < alg->cra_blocksize) |
| 245 | alg->cra_digest.dia_digestsize; | ||
| 246 | if (ds > alg->cra_blocksize) | ||
| 247 | goto out_put_alg; | 208 | goto out_put_alg; |
| 248 | 209 | ||
| 249 | inst = crypto_alloc_instance("hmac", alg); | 210 | inst = shash_alloc_instance("hmac", alg); |
| 211 | err = PTR_ERR(inst); | ||
| 250 | if (IS_ERR(inst)) | 212 | if (IS_ERR(inst)) |
| 251 | goto out_put_alg; | 213 | goto out_put_alg; |
| 252 | 214 | ||
| 253 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_HASH; | 215 | err = crypto_init_shash_spawn(shash_instance_ctx(inst), salg, |
| 254 | inst->alg.cra_priority = alg->cra_priority; | 216 | shash_crypto_instance(inst)); |
| 255 | inst->alg.cra_blocksize = alg->cra_blocksize; | 217 | if (err) |
| 256 | inst->alg.cra_alignmask = alg->cra_alignmask; | 218 | goto out_free_inst; |
| 257 | inst->alg.cra_type = &crypto_hash_type; | 219 | |
| 258 | 220 | inst->alg.base.cra_priority = alg->cra_priority; | |
| 259 | inst->alg.cra_hash.digestsize = ds; | 221 | inst->alg.base.cra_blocksize = alg->cra_blocksize; |
| 260 | 222 | inst->alg.base.cra_alignmask = alg->cra_alignmask; | |
| 261 | inst->alg.cra_ctxsize = sizeof(struct hmac_ctx) + | 223 | |
| 262 | ALIGN(inst->alg.cra_blocksize * 2 + ds, | 224 | ss = ALIGN(ss, alg->cra_alignmask + 1); |
| 263 | sizeof(void *)); | 225 | inst->alg.digestsize = ds; |
| 264 | 226 | inst->alg.statesize = ss; | |
| 265 | inst->alg.cra_init = hmac_init_tfm; | 227 | |
| 266 | inst->alg.cra_exit = hmac_exit_tfm; | 228 | inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) + |
| 267 | 229 | ALIGN(ss * 2, crypto_tfm_ctx_alignment()); | |
| 268 | inst->alg.cra_hash.init = hmac_init; | 230 | |
| 269 | inst->alg.cra_hash.update = hmac_update; | 231 | inst->alg.base.cra_init = hmac_init_tfm; |
| 270 | inst->alg.cra_hash.final = hmac_final; | 232 | inst->alg.base.cra_exit = hmac_exit_tfm; |
| 271 | inst->alg.cra_hash.digest = hmac_digest; | 233 | |
| 272 | inst->alg.cra_hash.setkey = hmac_setkey; | 234 | inst->alg.init = hmac_init; |
| 235 | inst->alg.update = hmac_update; | ||
| 236 | inst->alg.final = hmac_final; | ||
| 237 | inst->alg.finup = hmac_finup; | ||
| 238 | inst->alg.export = hmac_export; | ||
| 239 | inst->alg.import = hmac_import; | ||
| 240 | inst->alg.setkey = hmac_setkey; | ||
| 241 | |||
| 242 | err = shash_register_instance(tmpl, inst); | ||
| 243 | if (err) { | ||
| 244 | out_free_inst: | ||
| 245 | shash_free_instance(shash_crypto_instance(inst)); | ||
| 246 | } | ||
| 273 | 247 | ||
| 274 | out_put_alg: | 248 | out_put_alg: |
| 275 | crypto_mod_put(alg); | 249 | crypto_mod_put(alg); |
| 276 | return inst; | 250 | return err; |
| 277 | } | 251 | } |
| 278 | 252 | ||
| 279 | static struct crypto_template hmac_tmpl = { | 253 | static struct crypto_template hmac_tmpl = { |
| 280 | .name = "hmac", | 254 | .name = "hmac", |
| 281 | .alloc = hmac_alloc, | 255 | .create = hmac_create, |
| 282 | .free = hmac_free, | 256 | .free = shash_free_instance, |
| 283 | .module = THIS_MODULE, | 257 | .module = THIS_MODULE, |
| 284 | }; | 258 | }; |
| 285 | 259 | ||
diff --git a/crypto/internal.h b/crypto/internal.h index 113579a82dff..2d226362e594 100644 --- a/crypto/internal.h +++ b/crypto/internal.h | |||
| @@ -25,12 +25,7 @@ | |||
| 25 | #include <linux/notifier.h> | 25 | #include <linux/notifier.h> |
| 26 | #include <linux/rwsem.h> | 26 | #include <linux/rwsem.h> |
| 27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
| 28 | 28 | #include <linux/fips.h> | |
| 29 | #ifdef CONFIG_CRYPTO_FIPS | ||
| 30 | extern int fips_enabled; | ||
| 31 | #else | ||
| 32 | #define fips_enabled 0 | ||
| 33 | #endif | ||
| 34 | 29 | ||
| 35 | /* Crypto notification events. */ | 30 | /* Crypto notification events. */ |
| 36 | enum { | 31 | enum { |
| @@ -65,18 +60,6 @@ static inline void crypto_exit_proc(void) | |||
| 65 | { } | 60 | { } |
| 66 | #endif | 61 | #endif |
| 67 | 62 | ||
| 68 | static inline unsigned int crypto_digest_ctxsize(struct crypto_alg *alg) | ||
| 69 | { | ||
| 70 | unsigned int len = alg->cra_ctxsize; | ||
| 71 | |||
| 72 | if (alg->cra_alignmask) { | ||
| 73 | len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1); | ||
| 74 | len += alg->cra_digest.dia_digestsize; | ||
| 75 | } | ||
| 76 | |||
| 77 | return len; | ||
| 78 | } | ||
| 79 | |||
| 80 | static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg) | 63 | static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg) |
| 81 | { | 64 | { |
| 82 | return alg->cra_ctxsize; | 65 | return alg->cra_ctxsize; |
| @@ -91,12 +74,9 @@ struct crypto_alg *crypto_mod_get(struct crypto_alg *alg); | |||
| 91 | struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask); | 74 | struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask); |
| 92 | struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); | 75 | struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); |
| 93 | 76 | ||
| 94 | int crypto_init_digest_ops(struct crypto_tfm *tfm); | ||
| 95 | int crypto_init_digest_ops_async(struct crypto_tfm *tfm); | ||
| 96 | int crypto_init_cipher_ops(struct crypto_tfm *tfm); | 77 | int crypto_init_cipher_ops(struct crypto_tfm *tfm); |
| 97 | int crypto_init_compress_ops(struct crypto_tfm *tfm); | 78 | int crypto_init_compress_ops(struct crypto_tfm *tfm); |
| 98 | 79 | ||
| 99 | void crypto_exit_digest_ops(struct crypto_tfm *tfm); | ||
| 100 | void crypto_exit_cipher_ops(struct crypto_tfm *tfm); | 80 | void crypto_exit_cipher_ops(struct crypto_tfm *tfm); |
| 101 | void crypto_exit_compress_ops(struct crypto_tfm *tfm); | 81 | void crypto_exit_compress_ops(struct crypto_tfm *tfm); |
| 102 | 82 | ||
| @@ -111,12 +91,12 @@ struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, | |||
| 111 | u32 mask); | 91 | u32 mask); |
| 112 | void *crypto_create_tfm(struct crypto_alg *alg, | 92 | void *crypto_create_tfm(struct crypto_alg *alg, |
| 113 | const struct crypto_type *frontend); | 93 | const struct crypto_type *frontend); |
| 94 | struct crypto_alg *crypto_find_alg(const char *alg_name, | ||
| 95 | const struct crypto_type *frontend, | ||
| 96 | u32 type, u32 mask); | ||
| 114 | void *crypto_alloc_tfm(const char *alg_name, | 97 | void *crypto_alloc_tfm(const char *alg_name, |
| 115 | const struct crypto_type *frontend, u32 type, u32 mask); | 98 | const struct crypto_type *frontend, u32 type, u32 mask); |
| 116 | 99 | ||
| 117 | int crypto_register_instance(struct crypto_template *tmpl, | ||
| 118 | struct crypto_instance *inst); | ||
| 119 | |||
| 120 | int crypto_register_notifier(struct notifier_block *nb); | 100 | int crypto_register_notifier(struct notifier_block *nb); |
| 121 | int crypto_unregister_notifier(struct notifier_block *nb); | 101 | int crypto_unregister_notifier(struct notifier_block *nb); |
| 122 | int crypto_probing_notify(unsigned long val, void *v); | 102 | int crypto_probing_notify(unsigned long val, void *v); |
diff --git a/crypto/pcompress.c b/crypto/pcompress.c index bcadc03726b7..f7c4a7d7412e 100644 --- a/crypto/pcompress.c +++ b/crypto/pcompress.c | |||
| @@ -36,14 +36,12 @@ static int crypto_pcomp_init(struct crypto_tfm *tfm, u32 type, u32 mask) | |||
| 36 | return 0; | 36 | return 0; |
| 37 | } | 37 | } |
| 38 | 38 | ||
| 39 | static unsigned int crypto_pcomp_extsize(struct crypto_alg *alg, | 39 | static unsigned int crypto_pcomp_extsize(struct crypto_alg *alg) |
| 40 | const struct crypto_type *frontend) | ||
| 41 | { | 40 | { |
| 42 | return alg->cra_ctxsize; | 41 | return alg->cra_ctxsize; |
| 43 | } | 42 | } |
| 44 | 43 | ||
| 45 | static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm, | 44 | static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm) |
| 46 | const struct crypto_type *frontend) | ||
| 47 | { | 45 | { |
| 48 | return 0; | 46 | return 0; |
| 49 | } | 47 | } |
diff --git a/crypto/rng.c b/crypto/rng.c index 6e94bc735578..ba05e7380e76 100644 --- a/crypto/rng.c +++ b/crypto/rng.c | |||
| @@ -123,4 +123,4 @@ void crypto_put_default_rng(void) | |||
| 123 | EXPORT_SYMBOL_GPL(crypto_put_default_rng); | 123 | EXPORT_SYMBOL_GPL(crypto_put_default_rng); |
| 124 | 124 | ||
| 125 | MODULE_LICENSE("GPL"); | 125 | MODULE_LICENSE("GPL"); |
| 126 | MODULE_DESCRIPTION("Random Number Genertor"); | 126 | MODULE_DESCRIPTION("Random Number Generator"); |
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c index 9efef20454cb..0416091bf45a 100644 --- a/crypto/sha1_generic.c +++ b/crypto/sha1_generic.c | |||
| @@ -25,31 +25,21 @@ | |||
| 25 | #include <crypto/sha.h> | 25 | #include <crypto/sha.h> |
| 26 | #include <asm/byteorder.h> | 26 | #include <asm/byteorder.h> |
| 27 | 27 | ||
| 28 | struct sha1_ctx { | ||
| 29 | u64 count; | ||
| 30 | u32 state[5]; | ||
| 31 | u8 buffer[64]; | ||
| 32 | }; | ||
| 33 | |||
| 34 | static int sha1_init(struct shash_desc *desc) | 28 | static int sha1_init(struct shash_desc *desc) |
| 35 | { | 29 | { |
| 36 | struct sha1_ctx *sctx = shash_desc_ctx(desc); | 30 | struct sha1_state *sctx = shash_desc_ctx(desc); |
| 37 | 31 | ||
| 38 | static const struct sha1_ctx initstate = { | 32 | *sctx = (struct sha1_state){ |
| 39 | 0, | 33 | .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, |
| 40 | { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, | ||
| 41 | { 0, } | ||
| 42 | }; | 34 | }; |
| 43 | 35 | ||
| 44 | *sctx = initstate; | ||
| 45 | |||
| 46 | return 0; | 36 | return 0; |
| 47 | } | 37 | } |
| 48 | 38 | ||
| 49 | static int sha1_update(struct shash_desc *desc, const u8 *data, | 39 | static int sha1_update(struct shash_desc *desc, const u8 *data, |
| 50 | unsigned int len) | 40 | unsigned int len) |
| 51 | { | 41 | { |
| 52 | struct sha1_ctx *sctx = shash_desc_ctx(desc); | 42 | struct sha1_state *sctx = shash_desc_ctx(desc); |
| 53 | unsigned int partial, done; | 43 | unsigned int partial, done; |
| 54 | const u8 *src; | 44 | const u8 *src; |
| 55 | 45 | ||
| @@ -85,7 +75,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data, | |||
| 85 | /* Add padding and return the message digest. */ | 75 | /* Add padding and return the message digest. */ |
| 86 | static int sha1_final(struct shash_desc *desc, u8 *out) | 76 | static int sha1_final(struct shash_desc *desc, u8 *out) |
| 87 | { | 77 | { |
| 88 | struct sha1_ctx *sctx = shash_desc_ctx(desc); | 78 | struct sha1_state *sctx = shash_desc_ctx(desc); |
| 89 | __be32 *dst = (__be32 *)out; | 79 | __be32 *dst = (__be32 *)out; |
| 90 | u32 i, index, padlen; | 80 | u32 i, index, padlen; |
| 91 | __be64 bits; | 81 | __be64 bits; |
| @@ -111,12 +101,31 @@ static int sha1_final(struct shash_desc *desc, u8 *out) | |||
| 111 | return 0; | 101 | return 0; |
| 112 | } | 102 | } |
| 113 | 103 | ||
| 104 | static int sha1_export(struct shash_desc *desc, void *out) | ||
| 105 | { | ||
| 106 | struct sha1_state *sctx = shash_desc_ctx(desc); | ||
| 107 | |||
| 108 | memcpy(out, sctx, sizeof(*sctx)); | ||
| 109 | return 0; | ||
| 110 | } | ||
| 111 | |||
| 112 | static int sha1_import(struct shash_desc *desc, const void *in) | ||
| 113 | { | ||
| 114 | struct sha1_state *sctx = shash_desc_ctx(desc); | ||
| 115 | |||
| 116 | memcpy(sctx, in, sizeof(*sctx)); | ||
| 117 | return 0; | ||
| 118 | } | ||
| 119 | |||
| 114 | static struct shash_alg alg = { | 120 | static struct shash_alg alg = { |
| 115 | .digestsize = SHA1_DIGEST_SIZE, | 121 | .digestsize = SHA1_DIGEST_SIZE, |
| 116 | .init = sha1_init, | 122 | .init = sha1_init, |
| 117 | .update = sha1_update, | 123 | .update = sha1_update, |
| 118 | .final = sha1_final, | 124 | .final = sha1_final, |
| 119 | .descsize = sizeof(struct sha1_ctx), | 125 | .export = sha1_export, |
| 126 | .import = sha1_import, | ||
| 127 | .descsize = sizeof(struct sha1_state), | ||
| 128 | .statesize = sizeof(struct sha1_state), | ||
| 120 | .base = { | 129 | .base = { |
| 121 | .cra_name = "sha1", | 130 | .cra_name = "sha1", |
| 122 | .cra_driver_name= "sha1-generic", | 131 | .cra_driver_name= "sha1-generic", |
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c index 6349d8339d37..c48459ebf05b 100644 --- a/crypto/sha256_generic.c +++ b/crypto/sha256_generic.c | |||
| @@ -25,12 +25,6 @@ | |||
| 25 | #include <crypto/sha.h> | 25 | #include <crypto/sha.h> |
| 26 | #include <asm/byteorder.h> | 26 | #include <asm/byteorder.h> |
| 27 | 27 | ||
| 28 | struct sha256_ctx { | ||
| 29 | u32 count[2]; | ||
| 30 | u32 state[8]; | ||
| 31 | u8 buf[128]; | ||
| 32 | }; | ||
| 33 | |||
| 34 | static inline u32 Ch(u32 x, u32 y, u32 z) | 28 | static inline u32 Ch(u32 x, u32 y, u32 z) |
| 35 | { | 29 | { |
| 36 | return z ^ (x & (y ^ z)); | 30 | return z ^ (x & (y ^ z)); |
| @@ -222,7 +216,7 @@ static void sha256_transform(u32 *state, const u8 *input) | |||
| 222 | 216 | ||
| 223 | static int sha224_init(struct shash_desc *desc) | 217 | static int sha224_init(struct shash_desc *desc) |
| 224 | { | 218 | { |
| 225 | struct sha256_ctx *sctx = shash_desc_ctx(desc); | 219 | struct sha256_state *sctx = shash_desc_ctx(desc); |
| 226 | sctx->state[0] = SHA224_H0; | 220 | sctx->state[0] = SHA224_H0; |
| 227 | sctx->state[1] = SHA224_H1; | 221 | sctx->state[1] = SHA224_H1; |
| 228 | sctx->state[2] = SHA224_H2; | 222 | sctx->state[2] = SHA224_H2; |
| @@ -231,15 +225,14 @@ static int sha224_init(struct shash_desc *desc) | |||
| 231 | sctx->state[5] = SHA224_H5; | 225 | sctx->state[5] = SHA224_H5; |
| 232 | sctx->state[6] = SHA224_H6; | 226 | sctx->state[6] = SHA224_H6; |
| 233 | sctx->state[7] = SHA224_H7; | 227 | sctx->state[7] = SHA224_H7; |
| 234 | sctx->count[0] = 0; | 228 | sctx->count = 0; |
| 235 | sctx->count[1] = 0; | ||
| 236 | 229 | ||
| 237 | return 0; | 230 | return 0; |
| 238 | } | 231 | } |
| 239 | 232 | ||
| 240 | static int sha256_init(struct shash_desc *desc) | 233 | static int sha256_init(struct shash_desc *desc) |
| 241 | { | 234 | { |
| 242 | struct sha256_ctx *sctx = shash_desc_ctx(desc); | 235 | struct sha256_state *sctx = shash_desc_ctx(desc); |
| 243 | sctx->state[0] = SHA256_H0; | 236 | sctx->state[0] = SHA256_H0; |
| 244 | sctx->state[1] = SHA256_H1; | 237 | sctx->state[1] = SHA256_H1; |
| 245 | sctx->state[2] = SHA256_H2; | 238 | sctx->state[2] = SHA256_H2; |
| @@ -248,7 +241,7 @@ static int sha256_init(struct shash_desc *desc) | |||
| 248 | sctx->state[5] = SHA256_H5; | 241 | sctx->state[5] = SHA256_H5; |
| 249 | sctx->state[6] = SHA256_H6; | 242 | sctx->state[6] = SHA256_H6; |
| 250 | sctx->state[7] = SHA256_H7; | 243 | sctx->state[7] = SHA256_H7; |
| 251 | sctx->count[0] = sctx->count[1] = 0; | 244 | sctx->count = 0; |
| 252 | 245 | ||
| 253 | return 0; | 246 | return 0; |
| 254 | } | 247 | } |
| @@ -256,58 +249,54 @@ static int sha256_init(struct shash_desc *desc) | |||
| 256 | static int sha256_update(struct shash_desc *desc, const u8 *data, | 249 | static int sha256_update(struct shash_desc *desc, const u8 *data, |
| 257 | unsigned int len) | 250 | unsigned int len) |
| 258 | { | 251 | { |
| 259 | struct sha256_ctx *sctx = shash_desc_ctx(desc); | 252 | struct sha256_state *sctx = shash_desc_ctx(desc); |
| 260 | unsigned int i, index, part_len; | 253 | unsigned int partial, done; |
| 261 | 254 | const u8 *src; | |
| 262 | /* Compute number of bytes mod 128 */ | 255 | |
| 263 | index = (unsigned int)((sctx->count[0] >> 3) & 0x3f); | 256 | partial = sctx->count & 0x3f; |
| 264 | 257 | sctx->count += len; | |
| 265 | /* Update number of bits */ | 258 | done = 0; |
| 266 | if ((sctx->count[0] += (len << 3)) < (len << 3)) { | 259 | src = data; |
| 267 | sctx->count[1]++; | 260 | |
| 268 | sctx->count[1] += (len >> 29); | 261 | if ((partial + len) > 63) { |
| 269 | } | 262 | if (partial) { |
| 270 | 263 | done = -partial; | |
| 271 | part_len = 64 - index; | 264 | memcpy(sctx->buf + partial, data, done + 64); |
| 272 | 265 | src = sctx->buf; | |
| 273 | /* Transform as many times as possible. */ | 266 | } |
| 274 | if (len >= part_len) { | 267 | |
| 275 | memcpy(&sctx->buf[index], data, part_len); | 268 | do { |
| 276 | sha256_transform(sctx->state, sctx->buf); | 269 | sha256_transform(sctx->state, src); |
| 277 | 270 | done += 64; | |
| 278 | for (i = part_len; i + 63 < len; i += 64) | 271 | src = data + done; |
| 279 | sha256_transform(sctx->state, &data[i]); | 272 | } while (done + 63 < len); |
| 280 | index = 0; | 273 | |
| 281 | } else { | 274 | partial = 0; |
| 282 | i = 0; | ||
| 283 | } | 275 | } |
| 284 | 276 | memcpy(sctx->buf + partial, src, len - done); | |
| 285 | /* Buffer remaining input */ | ||
| 286 | memcpy(&sctx->buf[index], &data[i], len-i); | ||
| 287 | 277 | ||
| 288 | return 0; | 278 | return 0; |
| 289 | } | 279 | } |
| 290 | 280 | ||
| 291 | static int sha256_final(struct shash_desc *desc, u8 *out) | 281 | static int sha256_final(struct shash_desc *desc, u8 *out) |
| 292 | { | 282 | { |
| 293 | struct sha256_ctx *sctx = shash_desc_ctx(desc); | 283 | struct sha256_state *sctx = shash_desc_ctx(desc); |
| 294 | __be32 *dst = (__be32 *)out; | 284 | __be32 *dst = (__be32 *)out; |
| 295 | __be32 bits[2]; | 285 | __be64 bits; |
| 296 | unsigned int index, pad_len; | 286 | unsigned int index, pad_len; |
| 297 | int i; | 287 | int i; |
| 298 | static const u8 padding[64] = { 0x80, }; | 288 | static const u8 padding[64] = { 0x80, }; |
| 299 | 289 | ||
| 300 | /* Save number of bits */ | 290 | /* Save number of bits */ |
| 301 | bits[1] = cpu_to_be32(sctx->count[0]); | 291 | bits = cpu_to_be64(sctx->count << 3); |
| 302 | bits[0] = cpu_to_be32(sctx->count[1]); | ||
| 303 | 292 | ||
| 304 | /* Pad out to 56 mod 64. */ | 293 | /* Pad out to 56 mod 64. */ |
| 305 | index = (sctx->count[0] >> 3) & 0x3f; | 294 | index = sctx->count & 0x3f; |
| 306 | pad_len = (index < 56) ? (56 - index) : ((64+56) - index); | 295 | pad_len = (index < 56) ? (56 - index) : ((64+56) - index); |
| 307 | sha256_update(desc, padding, pad_len); | 296 | sha256_update(desc, padding, pad_len); |
| 308 | 297 | ||
| 309 | /* Append length (before padding) */ | 298 | /* Append length (before padding) */ |
| 310 | sha256_update(desc, (const u8 *)bits, sizeof(bits)); | 299 | sha256_update(desc, (const u8 *)&bits, sizeof(bits)); |
| 311 | 300 | ||
| 312 | /* Store state in digest */ | 301 | /* Store state in digest */ |
| 313 | for (i = 0; i < 8; i++) | 302 | for (i = 0; i < 8; i++) |
| @@ -331,12 +320,31 @@ static int sha224_final(struct shash_desc *desc, u8 *hash) | |||
| 331 | return 0; | 320 | return 0; |
| 332 | } | 321 | } |
| 333 | 322 | ||
| 323 | static int sha256_export(struct shash_desc *desc, void *out) | ||
| 324 | { | ||
| 325 | struct sha256_state *sctx = shash_desc_ctx(desc); | ||
| 326 | |||
| 327 | memcpy(out, sctx, sizeof(*sctx)); | ||
| 328 | return 0; | ||
| 329 | } | ||
| 330 | |||
| 331 | static int sha256_import(struct shash_desc *desc, const void *in) | ||
| 332 | { | ||
| 333 | struct sha256_state *sctx = shash_desc_ctx(desc); | ||
| 334 | |||
| 335 | memcpy(sctx, in, sizeof(*sctx)); | ||
| 336 | return 0; | ||
| 337 | } | ||
| 338 | |||
| 334 | static struct shash_alg sha256 = { | 339 | static struct shash_alg sha256 = { |
| 335 | .digestsize = SHA256_DIGEST_SIZE, | 340 | .digestsize = SHA256_DIGEST_SIZE, |
| 336 | .init = sha256_init, | 341 | .init = sha256_init, |
| 337 | .update = sha256_update, | 342 | .update = sha256_update, |
| 338 | .final = sha256_final, | 343 | .final = sha256_final, |
| 339 | .descsize = sizeof(struct sha256_ctx), | 344 | .export = sha256_export, |
| 345 | .import = sha256_import, | ||
| 346 | .descsize = sizeof(struct sha256_state), | ||
| 347 | .statesize = sizeof(struct sha256_state), | ||
| 340 | .base = { | 348 | .base = { |
| 341 | .cra_name = "sha256", | 349 | .cra_name = "sha256", |
| 342 | .cra_driver_name= "sha256-generic", | 350 | .cra_driver_name= "sha256-generic", |
| @@ -351,7 +359,7 @@ static struct shash_alg sha224 = { | |||
| 351 | .init = sha224_init, | 359 | .init = sha224_init, |
| 352 | .update = sha256_update, | 360 | .update = sha256_update, |
| 353 | .final = sha224_final, | 361 | .final = sha224_final, |
| 354 | .descsize = sizeof(struct sha256_ctx), | 362 | .descsize = sizeof(struct sha256_state), |
| 355 | .base = { | 363 | .base = { |
| 356 | .cra_name = "sha224", | 364 | .cra_name = "sha224", |
| 357 | .cra_driver_name= "sha224-generic", | 365 | .cra_driver_name= "sha224-generic", |
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c index 3bea38d12242..9ed9f60316e5 100644 --- a/crypto/sha512_generic.c +++ b/crypto/sha512_generic.c | |||
| @@ -21,12 +21,6 @@ | |||
| 21 | #include <linux/percpu.h> | 21 | #include <linux/percpu.h> |
| 22 | #include <asm/byteorder.h> | 22 | #include <asm/byteorder.h> |
| 23 | 23 | ||
| 24 | struct sha512_ctx { | ||
| 25 | u64 state[8]; | ||
| 26 | u32 count[4]; | ||
| 27 | u8 buf[128]; | ||
| 28 | }; | ||
| 29 | |||
| 30 | static DEFINE_PER_CPU(u64[80], msg_schedule); | 24 | static DEFINE_PER_CPU(u64[80], msg_schedule); |
| 31 | 25 | ||
| 32 | static inline u64 Ch(u64 x, u64 y, u64 z) | 26 | static inline u64 Ch(u64 x, u64 y, u64 z) |
| @@ -141,7 +135,7 @@ sha512_transform(u64 *state, const u8 *input) | |||
| 141 | static int | 135 | static int |
| 142 | sha512_init(struct shash_desc *desc) | 136 | sha512_init(struct shash_desc *desc) |
| 143 | { | 137 | { |
| 144 | struct sha512_ctx *sctx = shash_desc_ctx(desc); | 138 | struct sha512_state *sctx = shash_desc_ctx(desc); |
| 145 | sctx->state[0] = SHA512_H0; | 139 | sctx->state[0] = SHA512_H0; |
| 146 | sctx->state[1] = SHA512_H1; | 140 | sctx->state[1] = SHA512_H1; |
| 147 | sctx->state[2] = SHA512_H2; | 141 | sctx->state[2] = SHA512_H2; |
| @@ -150,7 +144,7 @@ sha512_init(struct shash_desc *desc) | |||
| 150 | sctx->state[5] = SHA512_H5; | 144 | sctx->state[5] = SHA512_H5; |
| 151 | sctx->state[6] = SHA512_H6; | 145 | sctx->state[6] = SHA512_H6; |
| 152 | sctx->state[7] = SHA512_H7; | 146 | sctx->state[7] = SHA512_H7; |
| 153 | sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0; | 147 | sctx->count[0] = sctx->count[1] = 0; |
| 154 | 148 | ||
| 155 | return 0; | 149 | return 0; |
| 156 | } | 150 | } |
| @@ -158,7 +152,7 @@ sha512_init(struct shash_desc *desc) | |||
| 158 | static int | 152 | static int |
| 159 | sha384_init(struct shash_desc *desc) | 153 | sha384_init(struct shash_desc *desc) |
| 160 | { | 154 | { |
| 161 | struct sha512_ctx *sctx = shash_desc_ctx(desc); | 155 | struct sha512_state *sctx = shash_desc_ctx(desc); |
| 162 | sctx->state[0] = SHA384_H0; | 156 | sctx->state[0] = SHA384_H0; |
| 163 | sctx->state[1] = SHA384_H1; | 157 | sctx->state[1] = SHA384_H1; |
| 164 | sctx->state[2] = SHA384_H2; | 158 | sctx->state[2] = SHA384_H2; |
| @@ -167,7 +161,7 @@ sha384_init(struct shash_desc *desc) | |||
| 167 | sctx->state[5] = SHA384_H5; | 161 | sctx->state[5] = SHA384_H5; |
| 168 | sctx->state[6] = SHA384_H6; | 162 | sctx->state[6] = SHA384_H6; |
| 169 | sctx->state[7] = SHA384_H7; | 163 | sctx->state[7] = SHA384_H7; |
| 170 | sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0; | 164 | sctx->count[0] = sctx->count[1] = 0; |
| 171 | 165 | ||
| 172 | return 0; | 166 | return 0; |
| 173 | } | 167 | } |
| @@ -175,20 +169,16 @@ sha384_init(struct shash_desc *desc) | |||
| 175 | static int | 169 | static int |
| 176 | sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len) | 170 | sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len) |
| 177 | { | 171 | { |
| 178 | struct sha512_ctx *sctx = shash_desc_ctx(desc); | 172 | struct sha512_state *sctx = shash_desc_ctx(desc); |
| 179 | 173 | ||
| 180 | unsigned int i, index, part_len; | 174 | unsigned int i, index, part_len; |
| 181 | 175 | ||
| 182 | /* Compute number of bytes mod 128 */ | 176 | /* Compute number of bytes mod 128 */ |
| 183 | index = (unsigned int)((sctx->count[0] >> 3) & 0x7F); | 177 | index = sctx->count[0] & 0x7f; |
| 184 | 178 | ||
| 185 | /* Update number of bits */ | 179 | /* Update number of bytes */ |
| 186 | if ((sctx->count[0] += (len << 3)) < (len << 3)) { | 180 | if (!(sctx->count[0] += len)) |
| 187 | if ((sctx->count[1] += 1) < 1) | 181 | sctx->count[1]++; |
| 188 | if ((sctx->count[2] += 1) < 1) | ||
| 189 | sctx->count[3]++; | ||
| 190 | sctx->count[1] += (len >> 29); | ||
| 191 | } | ||
| 192 | 182 | ||
| 193 | part_len = 128 - index; | 183 | part_len = 128 - index; |
| 194 | 184 | ||
| @@ -214,21 +204,19 @@ sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len) | |||
| 214 | static int | 204 | static int |
| 215 | sha512_final(struct shash_desc *desc, u8 *hash) | 205 | sha512_final(struct shash_desc *desc, u8 *hash) |
| 216 | { | 206 | { |
| 217 | struct sha512_ctx *sctx = shash_desc_ctx(desc); | 207 | struct sha512_state *sctx = shash_desc_ctx(desc); |
| 218 | static u8 padding[128] = { 0x80, }; | 208 | static u8 padding[128] = { 0x80, }; |
| 219 | __be64 *dst = (__be64 *)hash; | 209 | __be64 *dst = (__be64 *)hash; |
| 220 | __be32 bits[4]; | 210 | __be64 bits[2]; |
| 221 | unsigned int index, pad_len; | 211 | unsigned int index, pad_len; |
| 222 | int i; | 212 | int i; |
| 223 | 213 | ||
| 224 | /* Save number of bits */ | 214 | /* Save number of bits */ |
| 225 | bits[3] = cpu_to_be32(sctx->count[0]); | 215 | bits[1] = cpu_to_be64(sctx->count[0] << 3); |
| 226 | bits[2] = cpu_to_be32(sctx->count[1]); | 216 | bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); |
| 227 | bits[1] = cpu_to_be32(sctx->count[2]); | ||
| 228 | bits[0] = cpu_to_be32(sctx->count[3]); | ||
| 229 | 217 | ||
| 230 | /* Pad out to 112 mod 128. */ | 218 | /* Pad out to 112 mod 128. */ |
| 231 | index = (sctx->count[0] >> 3) & 0x7f; | 219 | index = sctx->count[0] & 0x7f; |
| 232 | pad_len = (index < 112) ? (112 - index) : ((128+112) - index); | 220 | pad_len = (index < 112) ? (112 - index) : ((128+112) - index); |
| 233 | sha512_update(desc, padding, pad_len); | 221 | sha512_update(desc, padding, pad_len); |
| 234 | 222 | ||
| @@ -240,7 +228,7 @@ sha512_final(struct shash_desc *desc, u8 *hash) | |||
| 240 | dst[i] = cpu_to_be64(sctx->state[i]); | 228 | dst[i] = cpu_to_be64(sctx->state[i]); |
| 241 | 229 | ||
| 242 | /* Zeroize sensitive information. */ | 230 | /* Zeroize sensitive information. */ |
| 243 | memset(sctx, 0, sizeof(struct sha512_ctx)); | 231 | memset(sctx, 0, sizeof(struct sha512_state)); |
| 244 | 232 | ||
| 245 | return 0; | 233 | return 0; |
| 246 | } | 234 | } |
| @@ -262,7 +250,7 @@ static struct shash_alg sha512 = { | |||
| 262 | .init = sha512_init, | 250 | .init = sha512_init, |
| 263 | .update = sha512_update, | 251 | .update = sha512_update, |
| 264 | .final = sha512_final, | 252 | .final = sha512_final, |
| 265 | .descsize = sizeof(struct sha512_ctx), | 253 | .descsize = sizeof(struct sha512_state), |
| 266 | .base = { | 254 | .base = { |
| 267 | .cra_name = "sha512", | 255 | .cra_name = "sha512", |
| 268 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | 256 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, |
| @@ -276,7 +264,7 @@ static struct shash_alg sha384 = { | |||
| 276 | .init = sha384_init, | 264 | .init = sha384_init, |
| 277 | .update = sha512_update, | 265 | .update = sha512_update, |
| 278 | .final = sha384_final, | 266 | .final = sha384_final, |
| 279 | .descsize = sizeof(struct sha512_ctx), | 267 | .descsize = sizeof(struct sha512_state), |
| 280 | .base = { | 268 | .base = { |
| 281 | .cra_name = "sha384", | 269 | .cra_name = "sha384", |
| 282 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | 270 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, |
diff --git a/crypto/shash.c b/crypto/shash.c index 2ccc8b0076ce..91f7b9d83881 100644 --- a/crypto/shash.c +++ b/crypto/shash.c | |||
| @@ -22,6 +22,12 @@ | |||
| 22 | 22 | ||
| 23 | static const struct crypto_type crypto_shash_type; | 23 | static const struct crypto_type crypto_shash_type; |
| 24 | 24 | ||
| 25 | static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, | ||
| 26 | unsigned int keylen) | ||
| 27 | { | ||
| 28 | return -ENOSYS; | ||
| 29 | } | ||
| 30 | |||
| 25 | static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, | 31 | static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, |
| 26 | unsigned int keylen) | 32 | unsigned int keylen) |
| 27 | { | 33 | { |
| @@ -39,8 +45,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, | |||
| 39 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); | 45 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); |
| 40 | memcpy(alignbuffer, key, keylen); | 46 | memcpy(alignbuffer, key, keylen); |
| 41 | err = shash->setkey(tfm, alignbuffer, keylen); | 47 | err = shash->setkey(tfm, alignbuffer, keylen); |
| 42 | memset(alignbuffer, 0, keylen); | 48 | kzfree(buffer); |
| 43 | kfree(buffer); | ||
| 44 | return err; | 49 | return err; |
| 45 | } | 50 | } |
| 46 | 51 | ||
| @@ -50,9 +55,6 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, | |||
| 50 | struct shash_alg *shash = crypto_shash_alg(tfm); | 55 | struct shash_alg *shash = crypto_shash_alg(tfm); |
| 51 | unsigned long alignmask = crypto_shash_alignmask(tfm); | 56 | unsigned long alignmask = crypto_shash_alignmask(tfm); |
| 52 | 57 | ||
| 53 | if (!shash->setkey) | ||
| 54 | return -ENOSYS; | ||
| 55 | |||
| 56 | if ((unsigned long)key & alignmask) | 58 | if ((unsigned long)key & alignmask) |
| 57 | return shash_setkey_unaligned(tfm, key, keylen); | 59 | return shash_setkey_unaligned(tfm, key, keylen); |
| 58 | 60 | ||
| @@ -74,15 +76,19 @@ static int shash_update_unaligned(struct shash_desc *desc, const u8 *data, | |||
| 74 | unsigned long alignmask = crypto_shash_alignmask(tfm); | 76 | unsigned long alignmask = crypto_shash_alignmask(tfm); |
| 75 | unsigned int unaligned_len = alignmask + 1 - | 77 | unsigned int unaligned_len = alignmask + 1 - |
| 76 | ((unsigned long)data & alignmask); | 78 | ((unsigned long)data & alignmask); |
| 77 | u8 buf[shash_align_buffer_size(unaligned_len, alignmask)] | 79 | u8 ubuf[shash_align_buffer_size(unaligned_len, alignmask)] |
| 78 | __attribute__ ((aligned)); | 80 | __attribute__ ((aligned)); |
| 81 | u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1); | ||
| 82 | int err; | ||
| 79 | 83 | ||
| 80 | if (unaligned_len > len) | 84 | if (unaligned_len > len) |
| 81 | unaligned_len = len; | 85 | unaligned_len = len; |
| 82 | 86 | ||
| 83 | memcpy(buf, data, unaligned_len); | 87 | memcpy(buf, data, unaligned_len); |
| 88 | err = shash->update(desc, buf, unaligned_len); | ||
| 89 | memset(buf, 0, unaligned_len); | ||
| 84 | 90 | ||
| 85 | return shash->update(desc, buf, unaligned_len) ?: | 91 | return err ?: |
| 86 | shash->update(desc, data + unaligned_len, len - unaligned_len); | 92 | shash->update(desc, data + unaligned_len, len - unaligned_len); |
| 87 | } | 93 | } |
| 88 | 94 | ||
| @@ -106,12 +112,19 @@ static int shash_final_unaligned(struct shash_desc *desc, u8 *out) | |||
| 106 | unsigned long alignmask = crypto_shash_alignmask(tfm); | 112 | unsigned long alignmask = crypto_shash_alignmask(tfm); |
| 107 | struct shash_alg *shash = crypto_shash_alg(tfm); | 113 | struct shash_alg *shash = crypto_shash_alg(tfm); |
| 108 | unsigned int ds = crypto_shash_digestsize(tfm); | 114 | unsigned int ds = crypto_shash_digestsize(tfm); |
| 109 | u8 buf[shash_align_buffer_size(ds, alignmask)] | 115 | u8 ubuf[shash_align_buffer_size(ds, alignmask)] |
| 110 | __attribute__ ((aligned)); | 116 | __attribute__ ((aligned)); |
| 117 | u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1); | ||
| 111 | int err; | 118 | int err; |
| 112 | 119 | ||
| 113 | err = shash->final(desc, buf); | 120 | err = shash->final(desc, buf); |
| 121 | if (err) | ||
| 122 | goto out; | ||
| 123 | |||
| 114 | memcpy(out, buf, ds); | 124 | memcpy(out, buf, ds); |
| 125 | |||
| 126 | out: | ||
| 127 | memset(buf, 0, ds); | ||
| 115 | return err; | 128 | return err; |
| 116 | } | 129 | } |
| 117 | 130 | ||
| @@ -142,8 +155,7 @@ int crypto_shash_finup(struct shash_desc *desc, const u8 *data, | |||
| 142 | struct shash_alg *shash = crypto_shash_alg(tfm); | 155 | struct shash_alg *shash = crypto_shash_alg(tfm); |
| 143 | unsigned long alignmask = crypto_shash_alignmask(tfm); | 156 | unsigned long alignmask = crypto_shash_alignmask(tfm); |
| 144 | 157 | ||
| 145 | if (((unsigned long)data | (unsigned long)out) & alignmask || | 158 | if (((unsigned long)data | (unsigned long)out) & alignmask) |
| 146 | !shash->finup) | ||
| 147 | return shash_finup_unaligned(desc, data, len, out); | 159 | return shash_finup_unaligned(desc, data, len, out); |
| 148 | 160 | ||
| 149 | return shash->finup(desc, data, len, out); | 161 | return shash->finup(desc, data, len, out); |
| @@ -154,8 +166,7 @@ static int shash_digest_unaligned(struct shash_desc *desc, const u8 *data, | |||
| 154 | unsigned int len, u8 *out) | 166 | unsigned int len, u8 *out) |
| 155 | { | 167 | { |
| 156 | return crypto_shash_init(desc) ?: | 168 | return crypto_shash_init(desc) ?: |
| 157 | crypto_shash_update(desc, data, len) ?: | 169 | crypto_shash_finup(desc, data, len, out); |
| 158 | crypto_shash_final(desc, out); | ||
| 159 | } | 170 | } |
| 160 | 171 | ||
| 161 | int crypto_shash_digest(struct shash_desc *desc, const u8 *data, | 172 | int crypto_shash_digest(struct shash_desc *desc, const u8 *data, |
| @@ -165,27 +176,24 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, | |||
| 165 | struct shash_alg *shash = crypto_shash_alg(tfm); | 176 | struct shash_alg *shash = crypto_shash_alg(tfm); |
| 166 | unsigned long alignmask = crypto_shash_alignmask(tfm); | 177 | unsigned long alignmask = crypto_shash_alignmask(tfm); |
| 167 | 178 | ||
| 168 | if (((unsigned long)data | (unsigned long)out) & alignmask || | 179 | if (((unsigned long)data | (unsigned long)out) & alignmask) |
| 169 | !shash->digest) | ||
| 170 | return shash_digest_unaligned(desc, data, len, out); | 180 | return shash_digest_unaligned(desc, data, len, out); |
| 171 | 181 | ||
| 172 | return shash->digest(desc, data, len, out); | 182 | return shash->digest(desc, data, len, out); |
| 173 | } | 183 | } |
| 174 | EXPORT_SYMBOL_GPL(crypto_shash_digest); | 184 | EXPORT_SYMBOL_GPL(crypto_shash_digest); |
| 175 | 185 | ||
| 176 | int crypto_shash_import(struct shash_desc *desc, const u8 *in) | 186 | static int shash_default_export(struct shash_desc *desc, void *out) |
| 177 | { | 187 | { |
| 178 | struct crypto_shash *tfm = desc->tfm; | 188 | memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(desc->tfm)); |
| 179 | struct shash_alg *alg = crypto_shash_alg(tfm); | 189 | return 0; |
| 180 | 190 | } | |
| 181 | memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(tfm)); | ||
| 182 | |||
| 183 | if (alg->reinit) | ||
| 184 | alg->reinit(desc); | ||
| 185 | 191 | ||
| 192 | static int shash_default_import(struct shash_desc *desc, const void *in) | ||
| 193 | { | ||
| 194 | memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(desc->tfm)); | ||
| 186 | return 0; | 195 | return 0; |
| 187 | } | 196 | } |
| 188 | EXPORT_SYMBOL_GPL(crypto_shash_import); | ||
| 189 | 197 | ||
| 190 | static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key, | 198 | static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key, |
| 191 | unsigned int keylen) | 199 | unsigned int keylen) |
| @@ -206,9 +214,8 @@ static int shash_async_init(struct ahash_request *req) | |||
| 206 | return crypto_shash_init(desc); | 214 | return crypto_shash_init(desc); |
| 207 | } | 215 | } |
| 208 | 216 | ||
| 209 | static int shash_async_update(struct ahash_request *req) | 217 | int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc) |
| 210 | { | 218 | { |
| 211 | struct shash_desc *desc = ahash_request_ctx(req); | ||
| 212 | struct crypto_hash_walk walk; | 219 | struct crypto_hash_walk walk; |
| 213 | int nbytes; | 220 | int nbytes; |
| 214 | 221 | ||
| @@ -218,13 +225,51 @@ static int shash_async_update(struct ahash_request *req) | |||
| 218 | 225 | ||
| 219 | return nbytes; | 226 | return nbytes; |
| 220 | } | 227 | } |
| 228 | EXPORT_SYMBOL_GPL(shash_ahash_update); | ||
| 229 | |||
| 230 | static int shash_async_update(struct ahash_request *req) | ||
| 231 | { | ||
| 232 | return shash_ahash_update(req, ahash_request_ctx(req)); | ||
| 233 | } | ||
| 221 | 234 | ||
| 222 | static int shash_async_final(struct ahash_request *req) | 235 | static int shash_async_final(struct ahash_request *req) |
| 223 | { | 236 | { |
| 224 | return crypto_shash_final(ahash_request_ctx(req), req->result); | 237 | return crypto_shash_final(ahash_request_ctx(req), req->result); |
| 225 | } | 238 | } |
| 226 | 239 | ||
| 227 | static int shash_async_digest(struct ahash_request *req) | 240 | int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc) |
| 241 | { | ||
| 242 | struct crypto_hash_walk walk; | ||
| 243 | int nbytes; | ||
| 244 | |||
| 245 | nbytes = crypto_hash_walk_first(req, &walk); | ||
| 246 | if (!nbytes) | ||
| 247 | return crypto_shash_final(desc, req->result); | ||
| 248 | |||
| 249 | do { | ||
| 250 | nbytes = crypto_hash_walk_last(&walk) ? | ||
| 251 | crypto_shash_finup(desc, walk.data, nbytes, | ||
| 252 | req->result) : | ||
| 253 | crypto_shash_update(desc, walk.data, nbytes); | ||
| 254 | nbytes = crypto_hash_walk_done(&walk, nbytes); | ||
| 255 | } while (nbytes > 0); | ||
| 256 | |||
| 257 | return nbytes; | ||
| 258 | } | ||
| 259 | EXPORT_SYMBOL_GPL(shash_ahash_finup); | ||
| 260 | |||
| 261 | static int shash_async_finup(struct ahash_request *req) | ||
| 262 | { | ||
| 263 | struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); | ||
| 264 | struct shash_desc *desc = ahash_request_ctx(req); | ||
| 265 | |||
| 266 | desc->tfm = *ctx; | ||
| 267 | desc->flags = req->base.flags; | ||
| 268 | |||
| 269 | return shash_ahash_finup(req, desc); | ||
| 270 | } | ||
| 271 | |||
| 272 | int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) | ||
| 228 | { | 273 | { |
| 229 | struct scatterlist *sg = req->src; | 274 | struct scatterlist *sg = req->src; |
| 230 | unsigned int offset = sg->offset; | 275 | unsigned int offset = sg->offset; |
| @@ -232,34 +277,40 @@ static int shash_async_digest(struct ahash_request *req) | |||
| 232 | int err; | 277 | int err; |
| 233 | 278 | ||
| 234 | if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { | 279 | if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { |
| 235 | struct crypto_shash **ctx = | ||
| 236 | crypto_ahash_ctx(crypto_ahash_reqtfm(req)); | ||
| 237 | struct shash_desc *desc = ahash_request_ctx(req); | ||
| 238 | void *data; | 280 | void *data; |
| 239 | 281 | ||
| 240 | desc->tfm = *ctx; | ||
| 241 | desc->flags = req->base.flags; | ||
| 242 | |||
| 243 | data = crypto_kmap(sg_page(sg), 0); | 282 | data = crypto_kmap(sg_page(sg), 0); |
| 244 | err = crypto_shash_digest(desc, data + offset, nbytes, | 283 | err = crypto_shash_digest(desc, data + offset, nbytes, |
| 245 | req->result); | 284 | req->result); |
| 246 | crypto_kunmap(data, 0); | 285 | crypto_kunmap(data, 0); |
| 247 | crypto_yield(desc->flags); | 286 | crypto_yield(desc->flags); |
| 248 | goto out; | 287 | } else |
| 249 | } | 288 | err = crypto_shash_init(desc) ?: |
| 289 | shash_ahash_finup(req, desc); | ||
| 250 | 290 | ||
| 251 | err = shash_async_init(req); | 291 | return err; |
| 252 | if (err) | 292 | } |
| 253 | goto out; | 293 | EXPORT_SYMBOL_GPL(shash_ahash_digest); |
| 254 | 294 | ||
| 255 | err = shash_async_update(req); | 295 | static int shash_async_digest(struct ahash_request *req) |
| 256 | if (err) | 296 | { |
| 257 | goto out; | 297 | struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); |
| 298 | struct shash_desc *desc = ahash_request_ctx(req); | ||
| 258 | 299 | ||
| 259 | err = shash_async_final(req); | 300 | desc->tfm = *ctx; |
| 301 | desc->flags = req->base.flags; | ||
| 260 | 302 | ||
| 261 | out: | 303 | return shash_ahash_digest(req, desc); |
| 262 | return err; | 304 | } |
| 305 | |||
| 306 | static int shash_async_export(struct ahash_request *req, void *out) | ||
| 307 | { | ||
| 308 | return crypto_shash_export(ahash_request_ctx(req), out); | ||
| 309 | } | ||
| 310 | |||
| 311 | static int shash_async_import(struct ahash_request *req, const void *in) | ||
| 312 | { | ||
| 313 | return crypto_shash_import(ahash_request_ctx(req), in); | ||
| 263 | } | 314 | } |
| 264 | 315 | ||
| 265 | static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm) | 316 | static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm) |
| @@ -269,11 +320,11 @@ static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm) | |||
| 269 | crypto_free_shash(*ctx); | 320 | crypto_free_shash(*ctx); |
| 270 | } | 321 | } |
| 271 | 322 | ||
| 272 | static int crypto_init_shash_ops_async(struct crypto_tfm *tfm) | 323 | int crypto_init_shash_ops_async(struct crypto_tfm *tfm) |
| 273 | { | 324 | { |
| 274 | struct crypto_alg *calg = tfm->__crt_alg; | 325 | struct crypto_alg *calg = tfm->__crt_alg; |
| 275 | struct shash_alg *alg = __crypto_shash_alg(calg); | 326 | struct shash_alg *alg = __crypto_shash_alg(calg); |
| 276 | struct ahash_tfm *crt = &tfm->crt_ahash; | 327 | struct crypto_ahash *crt = __crypto_ahash_cast(tfm); |
| 277 | struct crypto_shash **ctx = crypto_tfm_ctx(tfm); | 328 | struct crypto_shash **ctx = crypto_tfm_ctx(tfm); |
| 278 | struct crypto_shash *shash; | 329 | struct crypto_shash *shash; |
| 279 | 330 | ||
| @@ -291,11 +342,17 @@ static int crypto_init_shash_ops_async(struct crypto_tfm *tfm) | |||
| 291 | 342 | ||
| 292 | crt->init = shash_async_init; | 343 | crt->init = shash_async_init; |
| 293 | crt->update = shash_async_update; | 344 | crt->update = shash_async_update; |
| 294 | crt->final = shash_async_final; | 345 | crt->final = shash_async_final; |
| 346 | crt->finup = shash_async_finup; | ||
| 295 | crt->digest = shash_async_digest; | 347 | crt->digest = shash_async_digest; |
| 296 | crt->setkey = shash_async_setkey; | ||
| 297 | 348 | ||
| 298 | crt->digestsize = alg->digestsize; | 349 | if (alg->setkey) |
| 350 | crt->setkey = shash_async_setkey; | ||
| 351 | if (alg->export) | ||
| 352 | crt->export = shash_async_export; | ||
| 353 | if (alg->import) | ||
| 354 | crt->import = shash_async_import; | ||
| 355 | |||
| 299 | crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash); | 356 | crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash); |
| 300 | 357 | ||
| 301 | return 0; | 358 | return 0; |
| @@ -304,14 +361,16 @@ static int crypto_init_shash_ops_async(struct crypto_tfm *tfm) | |||
| 304 | static int shash_compat_setkey(struct crypto_hash *tfm, const u8 *key, | 361 | static int shash_compat_setkey(struct crypto_hash *tfm, const u8 *key, |
| 305 | unsigned int keylen) | 362 | unsigned int keylen) |
| 306 | { | 363 | { |
| 307 | struct shash_desc *desc = crypto_hash_ctx(tfm); | 364 | struct shash_desc **descp = crypto_hash_ctx(tfm); |
| 365 | struct shash_desc *desc = *descp; | ||
| 308 | 366 | ||
| 309 | return crypto_shash_setkey(desc->tfm, key, keylen); | 367 | return crypto_shash_setkey(desc->tfm, key, keylen); |
| 310 | } | 368 | } |
| 311 | 369 | ||
| 312 | static int shash_compat_init(struct hash_desc *hdesc) | 370 | static int shash_compat_init(struct hash_desc *hdesc) |
| 313 | { | 371 | { |
| 314 | struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm); | 372 | struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm); |
| 373 | struct shash_desc *desc = *descp; | ||
| 315 | 374 | ||
| 316 | desc->flags = hdesc->flags; | 375 | desc->flags = hdesc->flags; |
| 317 | 376 | ||
| @@ -321,7 +380,8 @@ static int shash_compat_init(struct hash_desc *hdesc) | |||
| 321 | static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg, | 380 | static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg, |
| 322 | unsigned int len) | 381 | unsigned int len) |
| 323 | { | 382 | { |
| 324 | struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm); | 383 | struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm); |
| 384 | struct shash_desc *desc = *descp; | ||
| 325 | struct crypto_hash_walk walk; | 385 | struct crypto_hash_walk walk; |
| 326 | int nbytes; | 386 | int nbytes; |
| 327 | 387 | ||
| @@ -334,7 +394,9 @@ static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg, | |||
| 334 | 394 | ||
| 335 | static int shash_compat_final(struct hash_desc *hdesc, u8 *out) | 395 | static int shash_compat_final(struct hash_desc *hdesc, u8 *out) |
| 336 | { | 396 | { |
| 337 | return crypto_shash_final(crypto_hash_ctx(hdesc->tfm), out); | 397 | struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm); |
| 398 | |||
| 399 | return crypto_shash_final(*descp, out); | ||
| 338 | } | 400 | } |
| 339 | 401 | ||
| 340 | static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg, | 402 | static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg, |
| @@ -344,7 +406,8 @@ static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg, | |||
| 344 | int err; | 406 | int err; |
| 345 | 407 | ||
| 346 | if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { | 408 | if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { |
| 347 | struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm); | 409 | struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm); |
| 410 | struct shash_desc *desc = *descp; | ||
| 348 | void *data; | 411 | void *data; |
| 349 | 412 | ||
| 350 | desc->flags = hdesc->flags; | 413 | desc->flags = hdesc->flags; |
| @@ -372,9 +435,11 @@ out: | |||
| 372 | 435 | ||
| 373 | static void crypto_exit_shash_ops_compat(struct crypto_tfm *tfm) | 436 | static void crypto_exit_shash_ops_compat(struct crypto_tfm *tfm) |
| 374 | { | 437 | { |
| 375 | struct shash_desc *desc= crypto_tfm_ctx(tfm); | 438 | struct shash_desc **descp = crypto_tfm_ctx(tfm); |
| 439 | struct shash_desc *desc = *descp; | ||
| 376 | 440 | ||
| 377 | crypto_free_shash(desc->tfm); | 441 | crypto_free_shash(desc->tfm); |
| 442 | kzfree(desc); | ||
| 378 | } | 443 | } |
| 379 | 444 | ||
| 380 | static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm) | 445 | static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm) |
| @@ -382,8 +447,9 @@ static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm) | |||
| 382 | struct hash_tfm *crt = &tfm->crt_hash; | 447 | struct hash_tfm *crt = &tfm->crt_hash; |
| 383 | struct crypto_alg *calg = tfm->__crt_alg; | 448 | struct crypto_alg *calg = tfm->__crt_alg; |
| 384 | struct shash_alg *alg = __crypto_shash_alg(calg); | 449 | struct shash_alg *alg = __crypto_shash_alg(calg); |
| 385 | struct shash_desc *desc = crypto_tfm_ctx(tfm); | 450 | struct shash_desc **descp = crypto_tfm_ctx(tfm); |
| 386 | struct crypto_shash *shash; | 451 | struct crypto_shash *shash; |
| 452 | struct shash_desc *desc; | ||
| 387 | 453 | ||
| 388 | if (!crypto_mod_get(calg)) | 454 | if (!crypto_mod_get(calg)) |
| 389 | return -EAGAIN; | 455 | return -EAGAIN; |
| @@ -394,6 +460,14 @@ static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm) | |||
| 394 | return PTR_ERR(shash); | 460 | return PTR_ERR(shash); |
| 395 | } | 461 | } |
| 396 | 462 | ||
| 463 | desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(shash), | ||
| 464 | GFP_KERNEL); | ||
| 465 | if (!desc) { | ||
| 466 | crypto_free_shash(shash); | ||
| 467 | return -ENOMEM; | ||
| 468 | } | ||
| 469 | |||
| 470 | *descp = desc; | ||
| 397 | desc->tfm = shash; | 471 | desc->tfm = shash; |
| 398 | tfm->exit = crypto_exit_shash_ops_compat; | 472 | tfm->exit = crypto_exit_shash_ops_compat; |
| 399 | 473 | ||
| @@ -413,8 +487,6 @@ static int crypto_init_shash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) | |||
| 413 | switch (mask & CRYPTO_ALG_TYPE_MASK) { | 487 | switch (mask & CRYPTO_ALG_TYPE_MASK) { |
| 414 | case CRYPTO_ALG_TYPE_HASH_MASK: | 488 | case CRYPTO_ALG_TYPE_HASH_MASK: |
| 415 | return crypto_init_shash_ops_compat(tfm); | 489 | return crypto_init_shash_ops_compat(tfm); |
| 416 | case CRYPTO_ALG_TYPE_AHASH_MASK: | ||
| 417 | return crypto_init_shash_ops_async(tfm); | ||
| 418 | } | 490 | } |
| 419 | 491 | ||
| 420 | return -EINVAL; | 492 | return -EINVAL; |
| @@ -423,26 +495,23 @@ static int crypto_init_shash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) | |||
| 423 | static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type, | 495 | static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type, |
| 424 | u32 mask) | 496 | u32 mask) |
| 425 | { | 497 | { |
| 426 | struct shash_alg *salg = __crypto_shash_alg(alg); | ||
| 427 | |||
| 428 | switch (mask & CRYPTO_ALG_TYPE_MASK) { | 498 | switch (mask & CRYPTO_ALG_TYPE_MASK) { |
| 429 | case CRYPTO_ALG_TYPE_HASH_MASK: | 499 | case CRYPTO_ALG_TYPE_HASH_MASK: |
| 430 | return sizeof(struct shash_desc) + salg->descsize; | 500 | return sizeof(struct shash_desc *); |
| 431 | case CRYPTO_ALG_TYPE_AHASH_MASK: | ||
| 432 | return sizeof(struct crypto_shash *); | ||
| 433 | } | 501 | } |
| 434 | 502 | ||
| 435 | return 0; | 503 | return 0; |
| 436 | } | 504 | } |
| 437 | 505 | ||
| 438 | static int crypto_shash_init_tfm(struct crypto_tfm *tfm, | 506 | static int crypto_shash_init_tfm(struct crypto_tfm *tfm) |
| 439 | const struct crypto_type *frontend) | ||
| 440 | { | 507 | { |
| 508 | struct crypto_shash *hash = __crypto_shash_cast(tfm); | ||
| 509 | |||
| 510 | hash->descsize = crypto_shash_alg(hash)->descsize; | ||
| 441 | return 0; | 511 | return 0; |
| 442 | } | 512 | } |
| 443 | 513 | ||
| 444 | static unsigned int crypto_shash_extsize(struct crypto_alg *alg, | 514 | static unsigned int crypto_shash_extsize(struct crypto_alg *alg) |
| 445 | const struct crypto_type *frontend) | ||
| 446 | { | 515 | { |
| 447 | return alg->cra_ctxsize; | 516 | return alg->cra_ctxsize; |
| 448 | } | 517 | } |
| @@ -456,7 +525,6 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg) | |||
| 456 | seq_printf(m, "type : shash\n"); | 525 | seq_printf(m, "type : shash\n"); |
| 457 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); | 526 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); |
| 458 | seq_printf(m, "digestsize : %u\n", salg->digestsize); | 527 | seq_printf(m, "digestsize : %u\n", salg->digestsize); |
| 459 | seq_printf(m, "descsize : %u\n", salg->descsize); | ||
| 460 | } | 528 | } |
| 461 | 529 | ||
| 462 | static const struct crypto_type crypto_shash_type = { | 530 | static const struct crypto_type crypto_shash_type = { |
| @@ -480,18 +548,43 @@ struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type, | |||
| 480 | } | 548 | } |
| 481 | EXPORT_SYMBOL_GPL(crypto_alloc_shash); | 549 | EXPORT_SYMBOL_GPL(crypto_alloc_shash); |
| 482 | 550 | ||
| 483 | int crypto_register_shash(struct shash_alg *alg) | 551 | static int shash_prepare_alg(struct shash_alg *alg) |
| 484 | { | 552 | { |
| 485 | struct crypto_alg *base = &alg->base; | 553 | struct crypto_alg *base = &alg->base; |
| 486 | 554 | ||
| 487 | if (alg->digestsize > PAGE_SIZE / 8 || | 555 | if (alg->digestsize > PAGE_SIZE / 8 || |
| 488 | alg->descsize > PAGE_SIZE / 8) | 556 | alg->descsize > PAGE_SIZE / 8 || |
| 557 | alg->statesize > PAGE_SIZE / 8) | ||
| 489 | return -EINVAL; | 558 | return -EINVAL; |
| 490 | 559 | ||
| 491 | base->cra_type = &crypto_shash_type; | 560 | base->cra_type = &crypto_shash_type; |
| 492 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; | 561 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; |
| 493 | base->cra_flags |= CRYPTO_ALG_TYPE_SHASH; | 562 | base->cra_flags |= CRYPTO_ALG_TYPE_SHASH; |
| 494 | 563 | ||
| 564 | if (!alg->finup) | ||
| 565 | alg->finup = shash_finup_unaligned; | ||
| 566 | if (!alg->digest) | ||
| 567 | alg->digest = shash_digest_unaligned; | ||
| 568 | if (!alg->export) { | ||
| 569 | alg->export = shash_default_export; | ||
| 570 | alg->import = shash_default_import; | ||
| 571 | alg->statesize = alg->descsize; | ||
| 572 | } | ||
| 573 | if (!alg->setkey) | ||
| 574 | alg->setkey = shash_no_setkey; | ||
| 575 | |||
| 576 | return 0; | ||
| 577 | } | ||
| 578 | |||
| 579 | int crypto_register_shash(struct shash_alg *alg) | ||
| 580 | { | ||
| 581 | struct crypto_alg *base = &alg->base; | ||
| 582 | int err; | ||
| 583 | |||
| 584 | err = shash_prepare_alg(alg); | ||
| 585 | if (err) | ||
| 586 | return err; | ||
| 587 | |||
| 495 | return crypto_register_alg(base); | 588 | return crypto_register_alg(base); |
| 496 | } | 589 | } |
| 497 | EXPORT_SYMBOL_GPL(crypto_register_shash); | 590 | EXPORT_SYMBOL_GPL(crypto_register_shash); |
| @@ -502,5 +595,44 @@ int crypto_unregister_shash(struct shash_alg *alg) | |||
| 502 | } | 595 | } |
| 503 | EXPORT_SYMBOL_GPL(crypto_unregister_shash); | 596 | EXPORT_SYMBOL_GPL(crypto_unregister_shash); |
| 504 | 597 | ||
| 598 | int shash_register_instance(struct crypto_template *tmpl, | ||
| 599 | struct shash_instance *inst) | ||
| 600 | { | ||
| 601 | int err; | ||
| 602 | |||
| 603 | err = shash_prepare_alg(&inst->alg); | ||
| 604 | if (err) | ||
| 605 | return err; | ||
| 606 | |||
| 607 | return crypto_register_instance(tmpl, shash_crypto_instance(inst)); | ||
| 608 | } | ||
| 609 | EXPORT_SYMBOL_GPL(shash_register_instance); | ||
| 610 | |||
| 611 | void shash_free_instance(struct crypto_instance *inst) | ||
| 612 | { | ||
| 613 | crypto_drop_spawn(crypto_instance_ctx(inst)); | ||
| 614 | kfree(shash_instance(inst)); | ||
| 615 | } | ||
| 616 | EXPORT_SYMBOL_GPL(shash_free_instance); | ||
| 617 | |||
| 618 | int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn, | ||
| 619 | struct shash_alg *alg, | ||
| 620 | struct crypto_instance *inst) | ||
| 621 | { | ||
| 622 | return crypto_init_spawn2(&spawn->base, &alg->base, inst, | ||
| 623 | &crypto_shash_type); | ||
| 624 | } | ||
| 625 | EXPORT_SYMBOL_GPL(crypto_init_shash_spawn); | ||
| 626 | |||
| 627 | struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask) | ||
| 628 | { | ||
| 629 | struct crypto_alg *alg; | ||
| 630 | |||
| 631 | alg = crypto_attr_alg2(rta, &crypto_shash_type, type, mask); | ||
| 632 | return IS_ERR(alg) ? ERR_CAST(alg) : | ||
| 633 | container_of(alg, struct shash_alg, base); | ||
| 634 | } | ||
| 635 | EXPORT_SYMBOL_GPL(shash_attr_alg); | ||
| 636 | |||
| 505 | MODULE_LICENSE("GPL"); | 637 | MODULE_LICENSE("GPL"); |
| 506 | MODULE_DESCRIPTION("Synchronous cryptographic hash type"); | 638 | MODULE_DESCRIPTION("Synchronous cryptographic hash type"); |
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index d59ba5079d14..aa3f84ccc786 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c | |||
| @@ -45,6 +45,9 @@ | |||
| 45 | */ | 45 | */ |
| 46 | static unsigned int sec; | 46 | static unsigned int sec; |
| 47 | 47 | ||
| 48 | static char *alg = NULL; | ||
| 49 | static u32 type; | ||
| 50 | static u32 mask; | ||
| 48 | static int mode; | 51 | static int mode; |
| 49 | static char *tvmem[TVMEMSIZE]; | 52 | static char *tvmem[TVMEMSIZE]; |
| 50 | 53 | ||
| @@ -716,6 +719,10 @@ static int do_test(int m) | |||
| 716 | ret += tcrypt_test("hmac(rmd160)"); | 719 | ret += tcrypt_test("hmac(rmd160)"); |
| 717 | break; | 720 | break; |
| 718 | 721 | ||
| 722 | case 109: | ||
| 723 | ret += tcrypt_test("vmac(aes)"); | ||
| 724 | break; | ||
| 725 | |||
| 719 | case 150: | 726 | case 150: |
| 720 | ret += tcrypt_test("ansi_cprng"); | 727 | ret += tcrypt_test("ansi_cprng"); |
| 721 | break; | 728 | break; |
| @@ -885,6 +892,12 @@ static int do_test(int m) | |||
| 885 | return ret; | 892 | return ret; |
| 886 | } | 893 | } |
| 887 | 894 | ||
| 895 | static int do_alg_test(const char *alg, u32 type, u32 mask) | ||
| 896 | { | ||
| 897 | return crypto_has_alg(alg, type, mask ?: CRYPTO_ALG_TYPE_MASK) ? | ||
| 898 | 0 : -ENOENT; | ||
| 899 | } | ||
| 900 | |||
| 888 | static int __init tcrypt_mod_init(void) | 901 | static int __init tcrypt_mod_init(void) |
| 889 | { | 902 | { |
| 890 | int err = -ENOMEM; | 903 | int err = -ENOMEM; |
| @@ -896,7 +909,11 @@ static int __init tcrypt_mod_init(void) | |||
| 896 | goto err_free_tv; | 909 | goto err_free_tv; |
| 897 | } | 910 | } |
| 898 | 911 | ||
| 899 | err = do_test(mode); | 912 | if (alg) |
| 913 | err = do_alg_test(alg, type, mask); | ||
| 914 | else | ||
| 915 | err = do_test(mode); | ||
| 916 | |||
| 900 | if (err) { | 917 | if (err) { |
| 901 | printk(KERN_ERR "tcrypt: one or more tests failed!\n"); | 918 | printk(KERN_ERR "tcrypt: one or more tests failed!\n"); |
| 902 | goto err_free_tv; | 919 | goto err_free_tv; |
| @@ -928,6 +945,9 @@ static void __exit tcrypt_mod_fini(void) { } | |||
| 928 | module_init(tcrypt_mod_init); | 945 | module_init(tcrypt_mod_init); |
| 929 | module_exit(tcrypt_mod_fini); | 946 | module_exit(tcrypt_mod_fini); |
| 930 | 947 | ||
| 948 | module_param(alg, charp, 0); | ||
| 949 | module_param(type, uint, 0); | ||
| 950 | module_param(mask, uint, 0); | ||
| 931 | module_param(mode, int, 0); | 951 | module_param(mode, int, 0); |
| 932 | module_param(sec, uint, 0); | 952 | module_param(sec, uint, 0); |
| 933 | MODULE_PARM_DESC(sec, "Length in seconds of speed tests " | 953 | MODULE_PARM_DESC(sec, "Length in seconds of speed tests " |
diff --git a/crypto/testmgr.c b/crypto/testmgr.c index e9e9d84293b9..6d5b746637be 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c | |||
| @@ -190,10 +190,6 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, | |||
| 190 | 190 | ||
| 191 | hash_buff = xbuf[0]; | 191 | hash_buff = xbuf[0]; |
| 192 | 192 | ||
| 193 | ret = -EINVAL; | ||
| 194 | if (WARN_ON(template[i].psize > PAGE_SIZE)) | ||
| 195 | goto out; | ||
| 196 | |||
| 197 | memcpy(hash_buff, template[i].plaintext, template[i].psize); | 193 | memcpy(hash_buff, template[i].plaintext, template[i].psize); |
| 198 | sg_init_one(&sg[0], hash_buff, template[i].psize); | 194 | sg_init_one(&sg[0], hash_buff, template[i].psize); |
| 199 | 195 | ||
| @@ -2252,6 +2248,15 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
| 2252 | } | 2248 | } |
| 2253 | } | 2249 | } |
| 2254 | }, { | 2250 | }, { |
| 2251 | .alg = "vmac(aes)", | ||
| 2252 | .test = alg_test_hash, | ||
| 2253 | .suite = { | ||
| 2254 | .hash = { | ||
| 2255 | .vecs = aes_vmac128_tv_template, | ||
| 2256 | .count = VMAC_AES_TEST_VECTORS | ||
| 2257 | } | ||
| 2258 | } | ||
| 2259 | }, { | ||
| 2255 | .alg = "wp256", | 2260 | .alg = "wp256", |
| 2256 | .test = alg_test_hash, | 2261 | .test = alg_test_hash, |
| 2257 | .suite = { | 2262 | .suite = { |
| @@ -2348,6 +2353,7 @@ static int alg_find_test(const char *alg) | |||
| 2348 | int alg_test(const char *driver, const char *alg, u32 type, u32 mask) | 2353 | int alg_test(const char *driver, const char *alg, u32 type, u32 mask) |
| 2349 | { | 2354 | { |
| 2350 | int i; | 2355 | int i; |
| 2356 | int j; | ||
| 2351 | int rc; | 2357 | int rc; |
| 2352 | 2358 | ||
| 2353 | if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) { | 2359 | if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) { |
| @@ -2369,14 +2375,22 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) | |||
| 2369 | } | 2375 | } |
| 2370 | 2376 | ||
| 2371 | i = alg_find_test(alg); | 2377 | i = alg_find_test(alg); |
| 2372 | if (i < 0) | 2378 | j = alg_find_test(driver); |
| 2379 | if (i < 0 && j < 0) | ||
| 2373 | goto notest; | 2380 | goto notest; |
| 2374 | 2381 | ||
| 2375 | if (fips_enabled && !alg_test_descs[i].fips_allowed) | 2382 | if (fips_enabled && ((i >= 0 && !alg_test_descs[i].fips_allowed) || |
| 2383 | (j >= 0 && !alg_test_descs[j].fips_allowed))) | ||
| 2376 | goto non_fips_alg; | 2384 | goto non_fips_alg; |
| 2377 | 2385 | ||
| 2378 | rc = alg_test_descs[i].test(alg_test_descs + i, driver, | 2386 | rc = 0; |
| 2379 | type, mask); | 2387 | if (i >= 0) |
| 2388 | rc |= alg_test_descs[i].test(alg_test_descs + i, driver, | ||
| 2389 | type, mask); | ||
| 2390 | if (j >= 0) | ||
| 2391 | rc |= alg_test_descs[j].test(alg_test_descs + j, driver, | ||
| 2392 | type, mask); | ||
| 2393 | |||
| 2380 | test_done: | 2394 | test_done: |
| 2381 | if (fips_enabled && rc) | 2395 | if (fips_enabled && rc) |
| 2382 | panic("%s: %s alg self test failed in fips mode!\n", driver, alg); | 2396 | panic("%s: %s alg self test failed in fips mode!\n", driver, alg); |
diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 69316228fc19..9963b18983ab 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h | |||
| @@ -1654,6 +1654,22 @@ static struct hash_testvec aes_xcbc128_tv_template[] = { | |||
| 1654 | } | 1654 | } |
| 1655 | }; | 1655 | }; |
| 1656 | 1656 | ||
| 1657 | #define VMAC_AES_TEST_VECTORS 1 | ||
| 1658 | static char vmac_string[128] = {'\x01', '\x01', '\x01', '\x01', | ||
| 1659 | '\x02', '\x03', '\x02', '\x02', | ||
| 1660 | '\x02', '\x04', '\x01', '\x07', | ||
| 1661 | '\x04', '\x01', '\x04', '\x03',}; | ||
| 1662 | static struct hash_testvec aes_vmac128_tv_template[] = { | ||
| 1663 | { | ||
| 1664 | .key = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
| 1665 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", | ||
| 1666 | .plaintext = vmac_string, | ||
| 1667 | .digest = "\xcb\xd7\x8a\xfd\xb7\x33\x79\xe7", | ||
| 1668 | .psize = 128, | ||
| 1669 | .ksize = 16, | ||
| 1670 | }, | ||
| 1671 | }; | ||
| 1672 | |||
| 1657 | /* | 1673 | /* |
| 1658 | * SHA384 HMAC test vectors from RFC4231 | 1674 | * SHA384 HMAC test vectors from RFC4231 |
| 1659 | */ | 1675 | */ |
diff --git a/crypto/vmac.c b/crypto/vmac.c new file mode 100644 index 000000000000..0a9468e575de --- /dev/null +++ b/crypto/vmac.c | |||
| @@ -0,0 +1,678 @@ | |||
| 1 | /* | ||
| 2 | * Modified to interface to the Linux kernel | ||
| 3 | * Copyright (c) 2009, Intel Corporation. | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms and conditions of the GNU General Public License, | ||
| 7 | * version 2, as published by the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
| 16 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
| 17 | */ | ||
| 18 | |||
| 19 | /* -------------------------------------------------------------------------- | ||
| 20 | * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai. | ||
| 21 | * This implementation is herby placed in the public domain. | ||
| 22 | * The authors offers no warranty. Use at your own risk. | ||
| 23 | * Please send bug reports to the authors. | ||
| 24 | * Last modified: 17 APR 08, 1700 PDT | ||
| 25 | * ----------------------------------------------------------------------- */ | ||
| 26 | |||
| 27 | #include <linux/init.h> | ||
| 28 | #include <linux/types.h> | ||
| 29 | #include <linux/crypto.h> | ||
| 30 | #include <linux/scatterlist.h> | ||
| 31 | #include <asm/byteorder.h> | ||
| 32 | #include <crypto/scatterwalk.h> | ||
| 33 | #include <crypto/vmac.h> | ||
| 34 | #include <crypto/internal/hash.h> | ||
| 35 | |||
| 36 | /* | ||
| 37 | * Constants and masks | ||
| 38 | */ | ||
| 39 | #define UINT64_C(x) x##ULL | ||
| 40 | const u64 p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */ | ||
| 41 | const u64 m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */ | ||
| 42 | const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */ | ||
| 43 | const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */ | ||
| 44 | const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ | ||
| 45 | |||
| 46 | #ifdef __LITTLE_ENDIAN | ||
| 47 | #define INDEX_HIGH 1 | ||
| 48 | #define INDEX_LOW 0 | ||
| 49 | #else | ||
| 50 | #define INDEX_HIGH 0 | ||
| 51 | #define INDEX_LOW 1 | ||
| 52 | #endif | ||
| 53 | |||
| 54 | /* | ||
| 55 | * The following routines are used in this implementation. They are | ||
| 56 | * written via macros to simulate zero-overhead call-by-reference. | ||
| 57 | * | ||
| 58 | * MUL64: 64x64->128-bit multiplication | ||
| 59 | * PMUL64: assumes top bits cleared on inputs | ||
| 60 | * ADD128: 128x128->128-bit addition | ||
| 61 | */ | ||
| 62 | |||
| 63 | #define ADD128(rh, rl, ih, il) \ | ||
| 64 | do { \ | ||
| 65 | u64 _il = (il); \ | ||
| 66 | (rl) += (_il); \ | ||
| 67 | if ((rl) < (_il)) \ | ||
| 68 | (rh)++; \ | ||
| 69 | (rh) += (ih); \ | ||
| 70 | } while (0) | ||
| 71 | |||
| 72 | #define MUL32(i1, i2) ((u64)(u32)(i1)*(u32)(i2)) | ||
| 73 | |||
| 74 | #define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \ | ||
| 75 | do { \ | ||
| 76 | u64 _i1 = (i1), _i2 = (i2); \ | ||
| 77 | u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2); \ | ||
| 78 | rh = MUL32(_i1>>32, _i2>>32); \ | ||
| 79 | rl = MUL32(_i1, _i2); \ | ||
| 80 | ADD128(rh, rl, (m >> 32), (m << 32)); \ | ||
| 81 | } while (0) | ||
| 82 | |||
| 83 | #define MUL64(rh, rl, i1, i2) \ | ||
| 84 | do { \ | ||
| 85 | u64 _i1 = (i1), _i2 = (i2); \ | ||
| 86 | u64 m1 = MUL32(_i1, _i2>>32); \ | ||
| 87 | u64 m2 = MUL32(_i1>>32, _i2); \ | ||
| 88 | rh = MUL32(_i1>>32, _i2>>32); \ | ||
| 89 | rl = MUL32(_i1, _i2); \ | ||
| 90 | ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \ | ||
| 91 | ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \ | ||
| 92 | } while (0) | ||
| 93 | |||
| 94 | /* | ||
| 95 | * For highest performance the L1 NH and L2 polynomial hashes should be | ||
| 96 | * carefully implemented to take advantage of one's target architechture. | ||
| 97 | * Here these two hash functions are defined multiple time; once for | ||
| 98 | * 64-bit architectures, once for 32-bit SSE2 architectures, and once | ||
| 99 | * for the rest (32-bit) architectures. | ||
| 100 | * For each, nh_16 *must* be defined (works on multiples of 16 bytes). | ||
| 101 | * Optionally, nh_vmac_nhbytes can be defined (for multiples of | ||
| 102 | * VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two | ||
| 103 | * NH computations at once). | ||
| 104 | */ | ||
| 105 | |||
| 106 | #ifdef CONFIG_64BIT | ||
| 107 | |||
| 108 | #define nh_16(mp, kp, nw, rh, rl) \ | ||
| 109 | do { \ | ||
| 110 | int i; u64 th, tl; \ | ||
| 111 | rh = rl = 0; \ | ||
| 112 | for (i = 0; i < nw; i += 2) { \ | ||
| 113 | MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ | ||
| 114 | le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ | ||
| 115 | ADD128(rh, rl, th, tl); \ | ||
| 116 | } \ | ||
| 117 | } while (0) | ||
| 118 | |||
| 119 | #define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \ | ||
| 120 | do { \ | ||
| 121 | int i; u64 th, tl; \ | ||
| 122 | rh1 = rl1 = rh = rl = 0; \ | ||
| 123 | for (i = 0; i < nw; i += 2) { \ | ||
| 124 | MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ | ||
| 125 | le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ | ||
| 126 | ADD128(rh, rl, th, tl); \ | ||
| 127 | MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \ | ||
| 128 | le64_to_cpup((mp)+i+1)+(kp)[i+3]); \ | ||
| 129 | ADD128(rh1, rl1, th, tl); \ | ||
| 130 | } \ | ||
| 131 | } while (0) | ||
| 132 | |||
| 133 | #if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */ | ||
| 134 | #define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \ | ||
| 135 | do { \ | ||
| 136 | int i; u64 th, tl; \ | ||
| 137 | rh = rl = 0; \ | ||
| 138 | for (i = 0; i < nw; i += 8) { \ | ||
| 139 | MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ | ||
| 140 | le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ | ||
| 141 | ADD128(rh, rl, th, tl); \ | ||
| 142 | MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \ | ||
| 143 | le64_to_cpup((mp)+i+3)+(kp)[i+3]); \ | ||
| 144 | ADD128(rh, rl, th, tl); \ | ||
| 145 | MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \ | ||
| 146 | le64_to_cpup((mp)+i+5)+(kp)[i+5]); \ | ||
| 147 | ADD128(rh, rl, th, tl); \ | ||
| 148 | MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \ | ||
| 149 | le64_to_cpup((mp)+i+7)+(kp)[i+7]); \ | ||
| 150 | ADD128(rh, rl, th, tl); \ | ||
| 151 | } \ | ||
| 152 | } while (0) | ||
| 153 | |||
| 154 | #define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \ | ||
| 155 | do { \ | ||
| 156 | int i; u64 th, tl; \ | ||
| 157 | rh1 = rl1 = rh = rl = 0; \ | ||
| 158 | for (i = 0; i < nw; i += 8) { \ | ||
| 159 | MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ | ||
| 160 | le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ | ||
| 161 | ADD128(rh, rl, th, tl); \ | ||
| 162 | MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \ | ||
| 163 | le64_to_cpup((mp)+i+1)+(kp)[i+3]); \ | ||
| 164 | ADD128(rh1, rl1, th, tl); \ | ||
| 165 | MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \ | ||
| 166 | le64_to_cpup((mp)+i+3)+(kp)[i+3]); \ | ||
| 167 | ADD128(rh, rl, th, tl); \ | ||
| 168 | MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+4], \ | ||
| 169 | le64_to_cpup((mp)+i+3)+(kp)[i+5]); \ | ||
| 170 | ADD128(rh1, rl1, th, tl); \ | ||
| 171 | MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \ | ||
| 172 | le64_to_cpup((mp)+i+5)+(kp)[i+5]); \ | ||
| 173 | ADD128(rh, rl, th, tl); \ | ||
| 174 | MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+6], \ | ||
| 175 | le64_to_cpup((mp)+i+5)+(kp)[i+7]); \ | ||
| 176 | ADD128(rh1, rl1, th, tl); \ | ||
| 177 | MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \ | ||
| 178 | le64_to_cpup((mp)+i+7)+(kp)[i+7]); \ | ||
| 179 | ADD128(rh, rl, th, tl); \ | ||
| 180 | MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+8], \ | ||
| 181 | le64_to_cpup((mp)+i+7)+(kp)[i+9]); \ | ||
| 182 | ADD128(rh1, rl1, th, tl); \ | ||
| 183 | } \ | ||
| 184 | } while (0) | ||
| 185 | #endif | ||
| 186 | |||
| 187 | #define poly_step(ah, al, kh, kl, mh, ml) \ | ||
| 188 | do { \ | ||
| 189 | u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0; \ | ||
| 190 | /* compute ab*cd, put bd into result registers */ \ | ||
| 191 | PMUL64(t3h, t3l, al, kh); \ | ||
| 192 | PMUL64(t2h, t2l, ah, kl); \ | ||
| 193 | PMUL64(t1h, t1l, ah, 2*kh); \ | ||
| 194 | PMUL64(ah, al, al, kl); \ | ||
| 195 | /* add 2 * ac to result */ \ | ||
| 196 | ADD128(ah, al, t1h, t1l); \ | ||
| 197 | /* add together ad + bc */ \ | ||
| 198 | ADD128(t2h, t2l, t3h, t3l); \ | ||
| 199 | /* now (ah,al), (t2l,2*t2h) need summing */ \ | ||
| 200 | /* first add the high registers, carrying into t2h */ \ | ||
| 201 | ADD128(t2h, ah, z, t2l); \ | ||
| 202 | /* double t2h and add top bit of ah */ \ | ||
| 203 | t2h = 2 * t2h + (ah >> 63); \ | ||
| 204 | ah &= m63; \ | ||
| 205 | /* now add the low registers */ \ | ||
| 206 | ADD128(ah, al, mh, ml); \ | ||
| 207 | ADD128(ah, al, z, t2h); \ | ||
| 208 | } while (0) | ||
| 209 | |||
| 210 | #else /* ! CONFIG_64BIT */ | ||
| 211 | |||
| 212 | #ifndef nh_16 | ||
| 213 | #define nh_16(mp, kp, nw, rh, rl) \ | ||
| 214 | do { \ | ||
| 215 | u64 t1, t2, m1, m2, t; \ | ||
| 216 | int i; \ | ||
| 217 | rh = rl = t = 0; \ | ||
| 218 | for (i = 0; i < nw; i += 2) { \ | ||
| 219 | t1 = le64_to_cpup(mp+i) + kp[i]; \ | ||
| 220 | t2 = le64_to_cpup(mp+i+1) + kp[i+1]; \ | ||
| 221 | m2 = MUL32(t1 >> 32, t2); \ | ||
| 222 | m1 = MUL32(t1, t2 >> 32); \ | ||
| 223 | ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \ | ||
| 224 | MUL32(t1, t2)); \ | ||
| 225 | rh += (u64)(u32)(m1 >> 32) \ | ||
| 226 | + (u32)(m2 >> 32); \ | ||
| 227 | t += (u64)(u32)m1 + (u32)m2; \ | ||
| 228 | } \ | ||
| 229 | ADD128(rh, rl, (t >> 32), (t << 32)); \ | ||
| 230 | } while (0) | ||
| 231 | #endif | ||
| 232 | |||
| 233 | static void poly_step_func(u64 *ahi, u64 *alo, | ||
| 234 | const u64 *kh, const u64 *kl, | ||
| 235 | const u64 *mh, const u64 *ml) | ||
| 236 | { | ||
| 237 | #define a0 (*(((u32 *)alo)+INDEX_LOW)) | ||
| 238 | #define a1 (*(((u32 *)alo)+INDEX_HIGH)) | ||
| 239 | #define a2 (*(((u32 *)ahi)+INDEX_LOW)) | ||
| 240 | #define a3 (*(((u32 *)ahi)+INDEX_HIGH)) | ||
| 241 | #define k0 (*(((u32 *)kl)+INDEX_LOW)) | ||
| 242 | #define k1 (*(((u32 *)kl)+INDEX_HIGH)) | ||
| 243 | #define k2 (*(((u32 *)kh)+INDEX_LOW)) | ||
| 244 | #define k3 (*(((u32 *)kh)+INDEX_HIGH)) | ||
| 245 | |||
| 246 | u64 p, q, t; | ||
| 247 | u32 t2; | ||
| 248 | |||
| 249 | p = MUL32(a3, k3); | ||
| 250 | p += p; | ||
| 251 | p += *(u64 *)mh; | ||
| 252 | p += MUL32(a0, k2); | ||
| 253 | p += MUL32(a1, k1); | ||
| 254 | p += MUL32(a2, k0); | ||
| 255 | t = (u32)(p); | ||
| 256 | p >>= 32; | ||
| 257 | p += MUL32(a0, k3); | ||
| 258 | p += MUL32(a1, k2); | ||
| 259 | p += MUL32(a2, k1); | ||
| 260 | p += MUL32(a3, k0); | ||
| 261 | t |= ((u64)((u32)p & 0x7fffffff)) << 32; | ||
| 262 | p >>= 31; | ||
| 263 | p += (u64)(((u32 *)ml)[INDEX_LOW]); | ||
| 264 | p += MUL32(a0, k0); | ||
| 265 | q = MUL32(a1, k3); | ||
| 266 | q += MUL32(a2, k2); | ||
| 267 | q += MUL32(a3, k1); | ||
| 268 | q += q; | ||
| 269 | p += q; | ||
| 270 | t2 = (u32)(p); | ||
| 271 | p >>= 32; | ||
| 272 | p += (u64)(((u32 *)ml)[INDEX_HIGH]); | ||
| 273 | p += MUL32(a0, k1); | ||
| 274 | p += MUL32(a1, k0); | ||
| 275 | q = MUL32(a2, k3); | ||
| 276 | q += MUL32(a3, k2); | ||
| 277 | q += q; | ||
| 278 | p += q; | ||
| 279 | *(u64 *)(alo) = (p << 32) | t2; | ||
| 280 | p >>= 32; | ||
| 281 | *(u64 *)(ahi) = p + t; | ||
| 282 | |||
| 283 | #undef a0 | ||
| 284 | #undef a1 | ||
| 285 | #undef a2 | ||
| 286 | #undef a3 | ||
| 287 | #undef k0 | ||
| 288 | #undef k1 | ||
| 289 | #undef k2 | ||
| 290 | #undef k3 | ||
| 291 | } | ||
| 292 | |||
| 293 | #define poly_step(ah, al, kh, kl, mh, ml) \ | ||
| 294 | poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml)) | ||
| 295 | |||
| 296 | #endif /* end of specialized NH and poly definitions */ | ||
| 297 | |||
| 298 | /* At least nh_16 is defined. Defined others as needed here */ | ||
| 299 | #ifndef nh_16_2 | ||
| 300 | #define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \ | ||
| 301 | do { \ | ||
| 302 | nh_16(mp, kp, nw, rh, rl); \ | ||
| 303 | nh_16(mp, ((kp)+2), nw, rh2, rl2); \ | ||
| 304 | } while (0) | ||
| 305 | #endif | ||
| 306 | #ifndef nh_vmac_nhbytes | ||
| 307 | #define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \ | ||
| 308 | nh_16(mp, kp, nw, rh, rl) | ||
| 309 | #endif | ||
| 310 | #ifndef nh_vmac_nhbytes_2 | ||
| 311 | #define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \ | ||
| 312 | do { \ | ||
| 313 | nh_vmac_nhbytes(mp, kp, nw, rh, rl); \ | ||
| 314 | nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2); \ | ||
| 315 | } while (0) | ||
| 316 | #endif | ||
| 317 | |||
| 318 | static void vhash_abort(struct vmac_ctx *ctx) | ||
| 319 | { | ||
| 320 | ctx->polytmp[0] = ctx->polykey[0] ; | ||
| 321 | ctx->polytmp[1] = ctx->polykey[1] ; | ||
| 322 | ctx->first_block_processed = 0; | ||
| 323 | } | ||
| 324 | |||
| 325 | static u64 l3hash(u64 p1, u64 p2, | ||
| 326 | u64 k1, u64 k2, u64 len) | ||
| 327 | { | ||
| 328 | u64 rh, rl, t, z = 0; | ||
| 329 | |||
| 330 | /* fully reduce (p1,p2)+(len,0) mod p127 */ | ||
| 331 | t = p1 >> 63; | ||
| 332 | p1 &= m63; | ||
| 333 | ADD128(p1, p2, len, t); | ||
| 334 | /* At this point, (p1,p2) is at most 2^127+(len<<64) */ | ||
| 335 | t = (p1 > m63) + ((p1 == m63) && (p2 == m64)); | ||
| 336 | ADD128(p1, p2, z, t); | ||
| 337 | p1 &= m63; | ||
| 338 | |||
| 339 | /* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */ | ||
| 340 | t = p1 + (p2 >> 32); | ||
| 341 | t += (t >> 32); | ||
| 342 | t += (u32)t > 0xfffffffeu; | ||
| 343 | p1 += (t >> 32); | ||
| 344 | p2 += (p1 << 32); | ||
| 345 | |||
| 346 | /* compute (p1+k1)%p64 and (p2+k2)%p64 */ | ||
| 347 | p1 += k1; | ||
| 348 | p1 += (0 - (p1 < k1)) & 257; | ||
| 349 | p2 += k2; | ||
| 350 | p2 += (0 - (p2 < k2)) & 257; | ||
| 351 | |||
| 352 | /* compute (p1+k1)*(p2+k2)%p64 */ | ||
| 353 | MUL64(rh, rl, p1, p2); | ||
| 354 | t = rh >> 56; | ||
| 355 | ADD128(t, rl, z, rh); | ||
| 356 | rh <<= 8; | ||
| 357 | ADD128(t, rl, z, rh); | ||
| 358 | t += t << 8; | ||
| 359 | rl += t; | ||
| 360 | rl += (0 - (rl < t)) & 257; | ||
| 361 | rl += (0 - (rl > p64-1)) & 257; | ||
| 362 | return rl; | ||
| 363 | } | ||
| 364 | |||
| 365 | static void vhash_update(const unsigned char *m, | ||
| 366 | unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */ | ||
| 367 | struct vmac_ctx *ctx) | ||
| 368 | { | ||
| 369 | u64 rh, rl, *mptr; | ||
| 370 | const u64 *kptr = (u64 *)ctx->nhkey; | ||
| 371 | int i; | ||
| 372 | u64 ch, cl; | ||
| 373 | u64 pkh = ctx->polykey[0]; | ||
| 374 | u64 pkl = ctx->polykey[1]; | ||
| 375 | |||
| 376 | mptr = (u64 *)m; | ||
| 377 | i = mbytes / VMAC_NHBYTES; /* Must be non-zero */ | ||
| 378 | |||
| 379 | ch = ctx->polytmp[0]; | ||
| 380 | cl = ctx->polytmp[1]; | ||
| 381 | |||
| 382 | if (!ctx->first_block_processed) { | ||
| 383 | ctx->first_block_processed = 1; | ||
| 384 | nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); | ||
| 385 | rh &= m62; | ||
| 386 | ADD128(ch, cl, rh, rl); | ||
| 387 | mptr += (VMAC_NHBYTES/sizeof(u64)); | ||
| 388 | i--; | ||
| 389 | } | ||
| 390 | |||
| 391 | while (i--) { | ||
| 392 | nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); | ||
| 393 | rh &= m62; | ||
| 394 | poly_step(ch, cl, pkh, pkl, rh, rl); | ||
| 395 | mptr += (VMAC_NHBYTES/sizeof(u64)); | ||
| 396 | } | ||
| 397 | |||
| 398 | ctx->polytmp[0] = ch; | ||
| 399 | ctx->polytmp[1] = cl; | ||
| 400 | } | ||
| 401 | |||
| 402 | static u64 vhash(unsigned char m[], unsigned int mbytes, | ||
| 403 | u64 *tagl, struct vmac_ctx *ctx) | ||
| 404 | { | ||
| 405 | u64 rh, rl, *mptr; | ||
| 406 | const u64 *kptr = (u64 *)ctx->nhkey; | ||
| 407 | int i, remaining; | ||
| 408 | u64 ch, cl; | ||
| 409 | u64 pkh = ctx->polykey[0]; | ||
| 410 | u64 pkl = ctx->polykey[1]; | ||
| 411 | |||
| 412 | mptr = (u64 *)m; | ||
| 413 | i = mbytes / VMAC_NHBYTES; | ||
| 414 | remaining = mbytes % VMAC_NHBYTES; | ||
| 415 | |||
| 416 | if (ctx->first_block_processed) { | ||
| 417 | ch = ctx->polytmp[0]; | ||
| 418 | cl = ctx->polytmp[1]; | ||
| 419 | } else if (i) { | ||
| 420 | nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl); | ||
| 421 | ch &= m62; | ||
| 422 | ADD128(ch, cl, pkh, pkl); | ||
| 423 | mptr += (VMAC_NHBYTES/sizeof(u64)); | ||
| 424 | i--; | ||
| 425 | } else if (remaining) { | ||
| 426 | nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl); | ||
| 427 | ch &= m62; | ||
| 428 | ADD128(ch, cl, pkh, pkl); | ||
| 429 | mptr += (VMAC_NHBYTES/sizeof(u64)); | ||
| 430 | goto do_l3; | ||
| 431 | } else {/* Empty String */ | ||
| 432 | ch = pkh; cl = pkl; | ||
| 433 | goto do_l3; | ||
| 434 | } | ||
| 435 | |||
| 436 | while (i--) { | ||
| 437 | nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); | ||
| 438 | rh &= m62; | ||
| 439 | poly_step(ch, cl, pkh, pkl, rh, rl); | ||
| 440 | mptr += (VMAC_NHBYTES/sizeof(u64)); | ||
| 441 | } | ||
| 442 | if (remaining) { | ||
| 443 | nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl); | ||
| 444 | rh &= m62; | ||
| 445 | poly_step(ch, cl, pkh, pkl, rh, rl); | ||
| 446 | } | ||
| 447 | |||
| 448 | do_l3: | ||
| 449 | vhash_abort(ctx); | ||
| 450 | remaining *= 8; | ||
| 451 | return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining); | ||
| 452 | } | ||
| 453 | |||
| 454 | static u64 vmac(unsigned char m[], unsigned int mbytes, | ||
| 455 | unsigned char n[16], u64 *tagl, | ||
| 456 | struct vmac_ctx_t *ctx) | ||
| 457 | { | ||
| 458 | u64 *in_n, *out_p; | ||
| 459 | u64 p, h; | ||
| 460 | int i; | ||
| 461 | |||
| 462 | in_n = ctx->__vmac_ctx.cached_nonce; | ||
| 463 | out_p = ctx->__vmac_ctx.cached_aes; | ||
| 464 | |||
| 465 | i = n[15] & 1; | ||
| 466 | if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) { | ||
| 467 | in_n[0] = *(u64 *)(n); | ||
| 468 | in_n[1] = *(u64 *)(n+8); | ||
| 469 | ((unsigned char *)in_n)[15] &= 0xFE; | ||
| 470 | crypto_cipher_encrypt_one(ctx->child, | ||
| 471 | (unsigned char *)out_p, (unsigned char *)in_n); | ||
| 472 | |||
| 473 | ((unsigned char *)in_n)[15] |= (unsigned char)(1-i); | ||
| 474 | } | ||
| 475 | p = be64_to_cpup(out_p + i); | ||
| 476 | h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx); | ||
| 477 | return p + h; | ||
| 478 | } | ||
| 479 | |||
| 480 | static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx) | ||
| 481 | { | ||
| 482 | u64 in[2] = {0}, out[2]; | ||
| 483 | unsigned i; | ||
| 484 | int err = 0; | ||
| 485 | |||
| 486 | err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN); | ||
| 487 | if (err) | ||
| 488 | return err; | ||
| 489 | |||
| 490 | /* Fill nh key */ | ||
| 491 | ((unsigned char *)in)[0] = 0x80; | ||
| 492 | for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) { | ||
| 493 | crypto_cipher_encrypt_one(ctx->child, | ||
| 494 | (unsigned char *)out, (unsigned char *)in); | ||
| 495 | ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out); | ||
| 496 | ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1); | ||
| 497 | ((unsigned char *)in)[15] += 1; | ||
| 498 | } | ||
| 499 | |||
| 500 | /* Fill poly key */ | ||
| 501 | ((unsigned char *)in)[0] = 0xC0; | ||
| 502 | in[1] = 0; | ||
| 503 | for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) { | ||
| 504 | crypto_cipher_encrypt_one(ctx->child, | ||
| 505 | (unsigned char *)out, (unsigned char *)in); | ||
| 506 | ctx->__vmac_ctx.polytmp[i] = | ||
| 507 | ctx->__vmac_ctx.polykey[i] = | ||
| 508 | be64_to_cpup(out) & mpoly; | ||
| 509 | ctx->__vmac_ctx.polytmp[i+1] = | ||
| 510 | ctx->__vmac_ctx.polykey[i+1] = | ||
| 511 | be64_to_cpup(out+1) & mpoly; | ||
| 512 | ((unsigned char *)in)[15] += 1; | ||
| 513 | } | ||
| 514 | |||
| 515 | /* Fill ip key */ | ||
| 516 | ((unsigned char *)in)[0] = 0xE0; | ||
| 517 | in[1] = 0; | ||
| 518 | for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) { | ||
| 519 | do { | ||
| 520 | crypto_cipher_encrypt_one(ctx->child, | ||
| 521 | (unsigned char *)out, (unsigned char *)in); | ||
| 522 | ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out); | ||
| 523 | ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1); | ||
| 524 | ((unsigned char *)in)[15] += 1; | ||
| 525 | } while (ctx->__vmac_ctx.l3key[i] >= p64 | ||
| 526 | || ctx->__vmac_ctx.l3key[i+1] >= p64); | ||
| 527 | } | ||
| 528 | |||
| 529 | /* Invalidate nonce/aes cache and reset other elements */ | ||
| 530 | ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */ | ||
| 531 | ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */ | ||
| 532 | ctx->__vmac_ctx.first_block_processed = 0; | ||
| 533 | |||
| 534 | return err; | ||
| 535 | } | ||
| 536 | |||
| 537 | static int vmac_setkey(struct crypto_shash *parent, | ||
| 538 | const u8 *key, unsigned int keylen) | ||
| 539 | { | ||
| 540 | struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); | ||
| 541 | |||
| 542 | if (keylen != VMAC_KEY_LEN) { | ||
| 543 | crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
| 544 | return -EINVAL; | ||
| 545 | } | ||
| 546 | |||
| 547 | return vmac_set_key((u8 *)key, ctx); | ||
| 548 | } | ||
| 549 | |||
| 550 | static int vmac_init(struct shash_desc *pdesc) | ||
| 551 | { | ||
| 552 | struct crypto_shash *parent = pdesc->tfm; | ||
| 553 | struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); | ||
| 554 | |||
| 555 | memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx)); | ||
| 556 | return 0; | ||
| 557 | } | ||
| 558 | |||
| 559 | static int vmac_update(struct shash_desc *pdesc, const u8 *p, | ||
| 560 | unsigned int len) | ||
| 561 | { | ||
| 562 | struct crypto_shash *parent = pdesc->tfm; | ||
| 563 | struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); | ||
| 564 | |||
| 565 | vhash_update(p, len, &ctx->__vmac_ctx); | ||
| 566 | |||
| 567 | return 0; | ||
| 568 | } | ||
| 569 | |||
| 570 | static int vmac_final(struct shash_desc *pdesc, u8 *out) | ||
| 571 | { | ||
| 572 | struct crypto_shash *parent = pdesc->tfm; | ||
| 573 | struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); | ||
| 574 | vmac_t mac; | ||
| 575 | u8 nonce[16] = {}; | ||
| 576 | |||
| 577 | mac = vmac(NULL, 0, nonce, NULL, ctx); | ||
| 578 | memcpy(out, &mac, sizeof(vmac_t)); | ||
| 579 | memset(&mac, 0, sizeof(vmac_t)); | ||
| 580 | memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx)); | ||
| 581 | return 0; | ||
| 582 | } | ||
| 583 | |||
| 584 | static int vmac_init_tfm(struct crypto_tfm *tfm) | ||
| 585 | { | ||
| 586 | struct crypto_cipher *cipher; | ||
| 587 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | ||
| 588 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | ||
| 589 | struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm); | ||
| 590 | |||
| 591 | cipher = crypto_spawn_cipher(spawn); | ||
| 592 | if (IS_ERR(cipher)) | ||
| 593 | return PTR_ERR(cipher); | ||
| 594 | |||
| 595 | ctx->child = cipher; | ||
| 596 | return 0; | ||
| 597 | } | ||
| 598 | |||
| 599 | static void vmac_exit_tfm(struct crypto_tfm *tfm) | ||
| 600 | { | ||
| 601 | struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm); | ||
| 602 | crypto_free_cipher(ctx->child); | ||
| 603 | } | ||
| 604 | |||
| 605 | static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) | ||
| 606 | { | ||
| 607 | struct shash_instance *inst; | ||
| 608 | struct crypto_alg *alg; | ||
| 609 | int err; | ||
| 610 | |||
| 611 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); | ||
| 612 | if (err) | ||
| 613 | return err; | ||
| 614 | |||
| 615 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, | ||
| 616 | CRYPTO_ALG_TYPE_MASK); | ||
| 617 | if (IS_ERR(alg)) | ||
| 618 | return PTR_ERR(alg); | ||
| 619 | |||
| 620 | inst = shash_alloc_instance("vmac", alg); | ||
| 621 | err = PTR_ERR(inst); | ||
| 622 | if (IS_ERR(inst)) | ||
| 623 | goto out_put_alg; | ||
| 624 | |||
| 625 | err = crypto_init_spawn(shash_instance_ctx(inst), alg, | ||
| 626 | shash_crypto_instance(inst), | ||
| 627 | CRYPTO_ALG_TYPE_MASK); | ||
| 628 | if (err) | ||
| 629 | goto out_free_inst; | ||
| 630 | |||
| 631 | inst->alg.base.cra_priority = alg->cra_priority; | ||
| 632 | inst->alg.base.cra_blocksize = alg->cra_blocksize; | ||
| 633 | inst->alg.base.cra_alignmask = alg->cra_alignmask; | ||
| 634 | |||
| 635 | inst->alg.digestsize = sizeof(vmac_t); | ||
| 636 | inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t); | ||
| 637 | inst->alg.base.cra_init = vmac_init_tfm; | ||
| 638 | inst->alg.base.cra_exit = vmac_exit_tfm; | ||
| 639 | |||
| 640 | inst->alg.init = vmac_init; | ||
| 641 | inst->alg.update = vmac_update; | ||
| 642 | inst->alg.final = vmac_final; | ||
| 643 | inst->alg.setkey = vmac_setkey; | ||
| 644 | |||
| 645 | err = shash_register_instance(tmpl, inst); | ||
| 646 | if (err) { | ||
| 647 | out_free_inst: | ||
| 648 | shash_free_instance(shash_crypto_instance(inst)); | ||
| 649 | } | ||
| 650 | |||
| 651 | out_put_alg: | ||
| 652 | crypto_mod_put(alg); | ||
| 653 | return err; | ||
| 654 | } | ||
| 655 | |||
| 656 | static struct crypto_template vmac_tmpl = { | ||
| 657 | .name = "vmac", | ||
| 658 | .create = vmac_create, | ||
| 659 | .free = shash_free_instance, | ||
| 660 | .module = THIS_MODULE, | ||
| 661 | }; | ||
| 662 | |||
| 663 | static int __init vmac_module_init(void) | ||
| 664 | { | ||
| 665 | return crypto_register_template(&vmac_tmpl); | ||
| 666 | } | ||
| 667 | |||
| 668 | static void __exit vmac_module_exit(void) | ||
| 669 | { | ||
| 670 | crypto_unregister_template(&vmac_tmpl); | ||
| 671 | } | ||
| 672 | |||
| 673 | module_init(vmac_module_init); | ||
| 674 | module_exit(vmac_module_exit); | ||
| 675 | |||
| 676 | MODULE_LICENSE("GPL"); | ||
| 677 | MODULE_DESCRIPTION("VMAC hash algorithm"); | ||
| 678 | |||
diff --git a/crypto/xcbc.c b/crypto/xcbc.c index b63b633e549c..bb7b67fba349 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c | |||
| @@ -19,211 +19,142 @@ | |||
| 19 | * Kazunori Miyazawa <miyazawa@linux-ipv6.org> | 19 | * Kazunori Miyazawa <miyazawa@linux-ipv6.org> |
| 20 | */ | 20 | */ |
| 21 | 21 | ||
| 22 | #include <crypto/scatterwalk.h> | 22 | #include <crypto/internal/hash.h> |
| 23 | #include <linux/crypto.h> | ||
| 24 | #include <linux/err.h> | 23 | #include <linux/err.h> |
| 25 | #include <linux/hardirq.h> | ||
| 26 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
| 27 | #include <linux/mm.h> | ||
| 28 | #include <linux/rtnetlink.h> | ||
| 29 | #include <linux/slab.h> | ||
| 30 | #include <linux/scatterlist.h> | ||
| 31 | 25 | ||
| 32 | static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101, | 26 | static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101, |
| 33 | 0x02020202, 0x02020202, 0x02020202, 0x02020202, | 27 | 0x02020202, 0x02020202, 0x02020202, 0x02020202, |
| 34 | 0x03030303, 0x03030303, 0x03030303, 0x03030303}; | 28 | 0x03030303, 0x03030303, 0x03030303, 0x03030303}; |
| 29 | |||
| 35 | /* | 30 | /* |
| 36 | * +------------------------ | 31 | * +------------------------ |
| 37 | * | <parent tfm> | 32 | * | <parent tfm> |
| 38 | * +------------------------ | 33 | * +------------------------ |
| 39 | * | crypto_xcbc_ctx | 34 | * | xcbc_tfm_ctx |
| 40 | * +------------------------ | 35 | * +------------------------ |
| 41 | * | odds (block size) | 36 | * | consts (block size * 2) |
| 42 | * +------------------------ | 37 | * +------------------------ |
| 43 | * | prev (block size) | 38 | */ |
| 39 | struct xcbc_tfm_ctx { | ||
| 40 | struct crypto_cipher *child; | ||
| 41 | u8 ctx[]; | ||
| 42 | }; | ||
| 43 | |||
| 44 | /* | ||
| 44 | * +------------------------ | 45 | * +------------------------ |
| 45 | * | key (block size) | 46 | * | <shash desc> |
| 46 | * +------------------------ | 47 | * +------------------------ |
| 47 | * | consts (block size * 3) | 48 | * | xcbc_desc_ctx |
| 49 | * +------------------------ | ||
| 50 | * | odds (block size) | ||
| 51 | * +------------------------ | ||
| 52 | * | prev (block size) | ||
| 48 | * +------------------------ | 53 | * +------------------------ |
| 49 | */ | 54 | */ |
| 50 | struct crypto_xcbc_ctx { | 55 | struct xcbc_desc_ctx { |
| 51 | struct crypto_cipher *child; | ||
| 52 | u8 *odds; | ||
| 53 | u8 *prev; | ||
| 54 | u8 *key; | ||
| 55 | u8 *consts; | ||
| 56 | void (*xor)(u8 *a, const u8 *b, unsigned int bs); | ||
| 57 | unsigned int keylen; | ||
| 58 | unsigned int len; | 56 | unsigned int len; |
| 57 | u8 ctx[]; | ||
| 59 | }; | 58 | }; |
| 60 | 59 | ||
| 61 | static void xor_128(u8 *a, const u8 *b, unsigned int bs) | 60 | static int crypto_xcbc_digest_setkey(struct crypto_shash *parent, |
| 62 | { | 61 | const u8 *inkey, unsigned int keylen) |
| 63 | ((u32 *)a)[0] ^= ((u32 *)b)[0]; | ||
| 64 | ((u32 *)a)[1] ^= ((u32 *)b)[1]; | ||
| 65 | ((u32 *)a)[2] ^= ((u32 *)b)[2]; | ||
| 66 | ((u32 *)a)[3] ^= ((u32 *)b)[3]; | ||
| 67 | } | ||
| 68 | |||
| 69 | static int _crypto_xcbc_digest_setkey(struct crypto_hash *parent, | ||
| 70 | struct crypto_xcbc_ctx *ctx) | ||
| 71 | { | 62 | { |
| 72 | int bs = crypto_hash_blocksize(parent); | 63 | unsigned long alignmask = crypto_shash_alignmask(parent); |
| 64 | struct xcbc_tfm_ctx *ctx = crypto_shash_ctx(parent); | ||
| 65 | int bs = crypto_shash_blocksize(parent); | ||
| 66 | u8 *consts = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); | ||
| 73 | int err = 0; | 67 | int err = 0; |
| 74 | u8 key1[bs]; | 68 | u8 key1[bs]; |
| 75 | 69 | ||
| 76 | if ((err = crypto_cipher_setkey(ctx->child, ctx->key, ctx->keylen))) | 70 | if ((err = crypto_cipher_setkey(ctx->child, inkey, keylen))) |
| 77 | return err; | 71 | return err; |
| 78 | 72 | ||
| 79 | crypto_cipher_encrypt_one(ctx->child, key1, ctx->consts); | 73 | crypto_cipher_encrypt_one(ctx->child, consts, (u8 *)ks + bs); |
| 74 | crypto_cipher_encrypt_one(ctx->child, consts + bs, (u8 *)ks + bs * 2); | ||
| 75 | crypto_cipher_encrypt_one(ctx->child, key1, (u8 *)ks); | ||
| 80 | 76 | ||
| 81 | return crypto_cipher_setkey(ctx->child, key1, bs); | 77 | return crypto_cipher_setkey(ctx->child, key1, bs); |
| 82 | } | ||
| 83 | |||
| 84 | static int crypto_xcbc_digest_setkey(struct crypto_hash *parent, | ||
| 85 | const u8 *inkey, unsigned int keylen) | ||
| 86 | { | ||
| 87 | struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent); | ||
| 88 | |||
| 89 | if (keylen != crypto_cipher_blocksize(ctx->child)) | ||
| 90 | return -EINVAL; | ||
| 91 | 78 | ||
| 92 | ctx->keylen = keylen; | ||
| 93 | memcpy(ctx->key, inkey, keylen); | ||
| 94 | ctx->consts = (u8*)ks; | ||
| 95 | |||
| 96 | return _crypto_xcbc_digest_setkey(parent, ctx); | ||
| 97 | } | 79 | } |
| 98 | 80 | ||
| 99 | static int crypto_xcbc_digest_init(struct hash_desc *pdesc) | 81 | static int crypto_xcbc_digest_init(struct shash_desc *pdesc) |
| 100 | { | 82 | { |
| 101 | struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(pdesc->tfm); | 83 | unsigned long alignmask = crypto_shash_alignmask(pdesc->tfm); |
| 102 | int bs = crypto_hash_blocksize(pdesc->tfm); | 84 | struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); |
| 85 | int bs = crypto_shash_blocksize(pdesc->tfm); | ||
| 86 | u8 *prev = PTR_ALIGN(&ctx->ctx[0], alignmask + 1) + bs; | ||
| 103 | 87 | ||
| 104 | ctx->len = 0; | 88 | ctx->len = 0; |
| 105 | memset(ctx->odds, 0, bs); | 89 | memset(prev, 0, bs); |
| 106 | memset(ctx->prev, 0, bs); | ||
| 107 | 90 | ||
| 108 | return 0; | 91 | return 0; |
| 109 | } | 92 | } |
| 110 | 93 | ||
| 111 | static int crypto_xcbc_digest_update2(struct hash_desc *pdesc, | 94 | static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p, |
| 112 | struct scatterlist *sg, | 95 | unsigned int len) |
| 113 | unsigned int nbytes) | ||
| 114 | { | 96 | { |
| 115 | struct crypto_hash *parent = pdesc->tfm; | 97 | struct crypto_shash *parent = pdesc->tfm; |
| 116 | struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent); | 98 | unsigned long alignmask = crypto_shash_alignmask(parent); |
| 117 | struct crypto_cipher *tfm = ctx->child; | 99 | struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent); |
| 118 | int bs = crypto_hash_blocksize(parent); | 100 | struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); |
| 119 | 101 | struct crypto_cipher *tfm = tctx->child; | |
| 120 | for (;;) { | 102 | int bs = crypto_shash_blocksize(parent); |
| 121 | struct page *pg = sg_page(sg); | 103 | u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); |
| 122 | unsigned int offset = sg->offset; | 104 | u8 *prev = odds + bs; |
| 123 | unsigned int slen = sg->length; | 105 | |
| 124 | 106 | /* checking the data can fill the block */ | |
| 125 | if (unlikely(slen > nbytes)) | 107 | if ((ctx->len + len) <= bs) { |
| 126 | slen = nbytes; | 108 | memcpy(odds + ctx->len, p, len); |
| 127 | 109 | ctx->len += len; | |
| 128 | nbytes -= slen; | 110 | return 0; |
| 129 | |||
| 130 | while (slen > 0) { | ||
| 131 | unsigned int len = min(slen, ((unsigned int)(PAGE_SIZE)) - offset); | ||
| 132 | char *p = crypto_kmap(pg, 0) + offset; | ||
| 133 | |||
| 134 | /* checking the data can fill the block */ | ||
| 135 | if ((ctx->len + len) <= bs) { | ||
| 136 | memcpy(ctx->odds + ctx->len, p, len); | ||
| 137 | ctx->len += len; | ||
| 138 | slen -= len; | ||
| 139 | |||
| 140 | /* checking the rest of the page */ | ||
| 141 | if (len + offset >= PAGE_SIZE) { | ||
| 142 | offset = 0; | ||
| 143 | pg++; | ||
| 144 | } else | ||
| 145 | offset += len; | ||
| 146 | |||
| 147 | crypto_kunmap(p, 0); | ||
| 148 | crypto_yield(pdesc->flags); | ||
| 149 | continue; | ||
| 150 | } | ||
| 151 | |||
| 152 | /* filling odds with new data and encrypting it */ | ||
| 153 | memcpy(ctx->odds + ctx->len, p, bs - ctx->len); | ||
| 154 | len -= bs - ctx->len; | ||
| 155 | p += bs - ctx->len; | ||
| 156 | |||
| 157 | ctx->xor(ctx->prev, ctx->odds, bs); | ||
| 158 | crypto_cipher_encrypt_one(tfm, ctx->prev, ctx->prev); | ||
| 159 | |||
| 160 | /* clearing the length */ | ||
| 161 | ctx->len = 0; | ||
| 162 | |||
| 163 | /* encrypting the rest of data */ | ||
| 164 | while (len > bs) { | ||
| 165 | ctx->xor(ctx->prev, p, bs); | ||
| 166 | crypto_cipher_encrypt_one(tfm, ctx->prev, | ||
| 167 | ctx->prev); | ||
| 168 | p += bs; | ||
| 169 | len -= bs; | ||
| 170 | } | ||
| 171 | |||
| 172 | /* keeping the surplus of blocksize */ | ||
| 173 | if (len) { | ||
| 174 | memcpy(ctx->odds, p, len); | ||
| 175 | ctx->len = len; | ||
| 176 | } | ||
| 177 | crypto_kunmap(p, 0); | ||
| 178 | crypto_yield(pdesc->flags); | ||
| 179 | slen -= min(slen, ((unsigned int)(PAGE_SIZE)) - offset); | ||
| 180 | offset = 0; | ||
| 181 | pg++; | ||
| 182 | } | ||
| 183 | |||
| 184 | if (!nbytes) | ||
| 185 | break; | ||
| 186 | sg = scatterwalk_sg_next(sg); | ||
| 187 | } | 111 | } |
| 188 | 112 | ||
| 189 | return 0; | 113 | /* filling odds with new data and encrypting it */ |
| 190 | } | 114 | memcpy(odds + ctx->len, p, bs - ctx->len); |
| 115 | len -= bs - ctx->len; | ||
| 116 | p += bs - ctx->len; | ||
| 191 | 117 | ||
| 192 | static int crypto_xcbc_digest_update(struct hash_desc *pdesc, | 118 | crypto_xor(prev, odds, bs); |
| 193 | struct scatterlist *sg, | 119 | crypto_cipher_encrypt_one(tfm, prev, prev); |
| 194 | unsigned int nbytes) | ||
| 195 | { | ||
| 196 | if (WARN_ON_ONCE(in_irq())) | ||
| 197 | return -EDEADLK; | ||
| 198 | return crypto_xcbc_digest_update2(pdesc, sg, nbytes); | ||
| 199 | } | ||
| 200 | 120 | ||
| 201 | static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out) | 121 | /* clearing the length */ |
| 202 | { | 122 | ctx->len = 0; |
| 203 | struct crypto_hash *parent = pdesc->tfm; | ||
| 204 | struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent); | ||
| 205 | struct crypto_cipher *tfm = ctx->child; | ||
| 206 | int bs = crypto_hash_blocksize(parent); | ||
| 207 | int err = 0; | ||
| 208 | |||
| 209 | if (ctx->len == bs) { | ||
| 210 | u8 key2[bs]; | ||
| 211 | 123 | ||
| 212 | if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0) | 124 | /* encrypting the rest of data */ |
| 213 | return err; | 125 | while (len > bs) { |
| 126 | crypto_xor(prev, p, bs); | ||
| 127 | crypto_cipher_encrypt_one(tfm, prev, prev); | ||
| 128 | p += bs; | ||
| 129 | len -= bs; | ||
| 130 | } | ||
| 214 | 131 | ||
| 215 | crypto_cipher_encrypt_one(tfm, key2, | 132 | /* keeping the surplus of blocksize */ |
| 216 | (u8 *)(ctx->consts + bs)); | 133 | if (len) { |
| 134 | memcpy(odds, p, len); | ||
| 135 | ctx->len = len; | ||
| 136 | } | ||
| 217 | 137 | ||
| 218 | ctx->xor(ctx->prev, ctx->odds, bs); | 138 | return 0; |
| 219 | ctx->xor(ctx->prev, key2, bs); | 139 | } |
| 220 | _crypto_xcbc_digest_setkey(parent, ctx); | ||
| 221 | 140 | ||
| 222 | crypto_cipher_encrypt_one(tfm, out, ctx->prev); | 141 | static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out) |
| 223 | } else { | 142 | { |
| 224 | u8 key3[bs]; | 143 | struct crypto_shash *parent = pdesc->tfm; |
| 144 | unsigned long alignmask = crypto_shash_alignmask(parent); | ||
| 145 | struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent); | ||
| 146 | struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); | ||
| 147 | struct crypto_cipher *tfm = tctx->child; | ||
| 148 | int bs = crypto_shash_blocksize(parent); | ||
| 149 | u8 *consts = PTR_ALIGN(&tctx->ctx[0], alignmask + 1); | ||
| 150 | u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); | ||
| 151 | u8 *prev = odds + bs; | ||
| 152 | unsigned int offset = 0; | ||
| 153 | |||
| 154 | if (ctx->len != bs) { | ||
| 225 | unsigned int rlen; | 155 | unsigned int rlen; |
| 226 | u8 *p = ctx->odds + ctx->len; | 156 | u8 *p = odds + ctx->len; |
| 157 | |||
| 227 | *p = 0x80; | 158 | *p = 0x80; |
| 228 | p++; | 159 | p++; |
| 229 | 160 | ||
| @@ -231,32 +162,15 @@ static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out) | |||
| 231 | if (rlen) | 162 | if (rlen) |
| 232 | memset(p, 0, rlen); | 163 | memset(p, 0, rlen); |
| 233 | 164 | ||
| 234 | if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0) | 165 | offset += bs; |
| 235 | return err; | ||
| 236 | |||
| 237 | crypto_cipher_encrypt_one(tfm, key3, | ||
| 238 | (u8 *)(ctx->consts + bs * 2)); | ||
| 239 | |||
| 240 | ctx->xor(ctx->prev, ctx->odds, bs); | ||
| 241 | ctx->xor(ctx->prev, key3, bs); | ||
| 242 | |||
| 243 | _crypto_xcbc_digest_setkey(parent, ctx); | ||
| 244 | |||
| 245 | crypto_cipher_encrypt_one(tfm, out, ctx->prev); | ||
| 246 | } | 166 | } |
| 247 | 167 | ||
| 248 | return 0; | 168 | crypto_xor(prev, odds, bs); |
| 249 | } | 169 | crypto_xor(prev, consts + offset, bs); |
| 250 | 170 | ||
| 251 | static int crypto_xcbc_digest(struct hash_desc *pdesc, | 171 | crypto_cipher_encrypt_one(tfm, out, prev); |
| 252 | struct scatterlist *sg, unsigned int nbytes, u8 *out) | ||
| 253 | { | ||
| 254 | if (WARN_ON_ONCE(in_irq())) | ||
| 255 | return -EDEADLK; | ||
| 256 | 172 | ||
| 257 | crypto_xcbc_digest_init(pdesc); | 173 | return 0; |
| 258 | crypto_xcbc_digest_update2(pdesc, sg, nbytes); | ||
| 259 | return crypto_xcbc_digest_final(pdesc, out); | ||
| 260 | } | 174 | } |
| 261 | 175 | ||
| 262 | static int xcbc_init_tfm(struct crypto_tfm *tfm) | 176 | static int xcbc_init_tfm(struct crypto_tfm *tfm) |
| @@ -264,95 +178,95 @@ static int xcbc_init_tfm(struct crypto_tfm *tfm) | |||
| 264 | struct crypto_cipher *cipher; | 178 | struct crypto_cipher *cipher; |
| 265 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 179 | struct crypto_instance *inst = (void *)tfm->__crt_alg; |
| 266 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | 180 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); |
| 267 | struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm)); | 181 | struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm); |
| 268 | int bs = crypto_hash_blocksize(__crypto_hash_cast(tfm)); | ||
| 269 | 182 | ||
| 270 | cipher = crypto_spawn_cipher(spawn); | 183 | cipher = crypto_spawn_cipher(spawn); |
| 271 | if (IS_ERR(cipher)) | 184 | if (IS_ERR(cipher)) |
| 272 | return PTR_ERR(cipher); | 185 | return PTR_ERR(cipher); |
| 273 | 186 | ||
| 274 | switch(bs) { | ||
| 275 | case 16: | ||
| 276 | ctx->xor = xor_128; | ||
| 277 | break; | ||
| 278 | default: | ||
| 279 | return -EINVAL; | ||
| 280 | } | ||
| 281 | |||
| 282 | ctx->child = cipher; | 187 | ctx->child = cipher; |
| 283 | ctx->odds = (u8*)(ctx+1); | ||
| 284 | ctx->prev = ctx->odds + bs; | ||
| 285 | ctx->key = ctx->prev + bs; | ||
| 286 | 188 | ||
| 287 | return 0; | 189 | return 0; |
| 288 | }; | 190 | }; |
| 289 | 191 | ||
| 290 | static void xcbc_exit_tfm(struct crypto_tfm *tfm) | 192 | static void xcbc_exit_tfm(struct crypto_tfm *tfm) |
| 291 | { | 193 | { |
| 292 | struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm)); | 194 | struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm); |
| 293 | crypto_free_cipher(ctx->child); | 195 | crypto_free_cipher(ctx->child); |
| 294 | } | 196 | } |
| 295 | 197 | ||
| 296 | static struct crypto_instance *xcbc_alloc(struct rtattr **tb) | 198 | static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) |
| 297 | { | 199 | { |
| 298 | struct crypto_instance *inst; | 200 | struct shash_instance *inst; |
| 299 | struct crypto_alg *alg; | 201 | struct crypto_alg *alg; |
| 202 | unsigned long alignmask; | ||
| 300 | int err; | 203 | int err; |
| 301 | 204 | ||
| 302 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH); | 205 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); |
| 303 | if (err) | 206 | if (err) |
| 304 | return ERR_PTR(err); | 207 | return err; |
| 305 | 208 | ||
| 306 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, | 209 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, |
| 307 | CRYPTO_ALG_TYPE_MASK); | 210 | CRYPTO_ALG_TYPE_MASK); |
| 308 | if (IS_ERR(alg)) | 211 | if (IS_ERR(alg)) |
| 309 | return ERR_CAST(alg); | 212 | return PTR_ERR(alg); |
| 310 | 213 | ||
| 311 | switch(alg->cra_blocksize) { | 214 | switch(alg->cra_blocksize) { |
| 312 | case 16: | 215 | case 16: |
| 313 | break; | 216 | break; |
| 314 | default: | 217 | default: |
| 315 | inst = ERR_PTR(-EINVAL); | ||
| 316 | goto out_put_alg; | 218 | goto out_put_alg; |
| 317 | } | 219 | } |
| 318 | 220 | ||
| 319 | inst = crypto_alloc_instance("xcbc", alg); | 221 | inst = shash_alloc_instance("xcbc", alg); |
| 222 | err = PTR_ERR(inst); | ||
| 320 | if (IS_ERR(inst)) | 223 | if (IS_ERR(inst)) |
| 321 | goto out_put_alg; | 224 | goto out_put_alg; |
| 322 | 225 | ||
| 323 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_HASH; | 226 | err = crypto_init_spawn(shash_instance_ctx(inst), alg, |
| 324 | inst->alg.cra_priority = alg->cra_priority; | 227 | shash_crypto_instance(inst), |
| 325 | inst->alg.cra_blocksize = alg->cra_blocksize; | 228 | CRYPTO_ALG_TYPE_MASK); |
| 326 | inst->alg.cra_alignmask = alg->cra_alignmask; | 229 | if (err) |
| 327 | inst->alg.cra_type = &crypto_hash_type; | 230 | goto out_free_inst; |
| 328 | 231 | ||
| 329 | inst->alg.cra_hash.digestsize = alg->cra_blocksize; | 232 | alignmask = alg->cra_alignmask | 3; |
| 330 | inst->alg.cra_ctxsize = sizeof(struct crypto_xcbc_ctx) + | 233 | inst->alg.base.cra_alignmask = alignmask; |
| 331 | ALIGN(inst->alg.cra_blocksize * 3, sizeof(void *)); | 234 | inst->alg.base.cra_priority = alg->cra_priority; |
| 332 | inst->alg.cra_init = xcbc_init_tfm; | 235 | inst->alg.base.cra_blocksize = alg->cra_blocksize; |
| 333 | inst->alg.cra_exit = xcbc_exit_tfm; | 236 | |
| 334 | 237 | inst->alg.digestsize = alg->cra_blocksize; | |
| 335 | inst->alg.cra_hash.init = crypto_xcbc_digest_init; | 238 | inst->alg.descsize = ALIGN(sizeof(struct xcbc_desc_ctx), |
| 336 | inst->alg.cra_hash.update = crypto_xcbc_digest_update; | 239 | crypto_tfm_ctx_alignment()) + |
| 337 | inst->alg.cra_hash.final = crypto_xcbc_digest_final; | 240 | (alignmask & |
| 338 | inst->alg.cra_hash.digest = crypto_xcbc_digest; | 241 | ~(crypto_tfm_ctx_alignment() - 1)) + |
| 339 | inst->alg.cra_hash.setkey = crypto_xcbc_digest_setkey; | 242 | alg->cra_blocksize * 2; |
| 243 | |||
| 244 | inst->alg.base.cra_ctxsize = ALIGN(sizeof(struct xcbc_tfm_ctx), | ||
| 245 | alignmask + 1) + | ||
| 246 | alg->cra_blocksize * 2; | ||
| 247 | inst->alg.base.cra_init = xcbc_init_tfm; | ||
| 248 | inst->alg.base.cra_exit = xcbc_exit_tfm; | ||
| 249 | |||
| 250 | inst->alg.init = crypto_xcbc_digest_init; | ||
| 251 | inst->alg.update = crypto_xcbc_digest_update; | ||
| 252 | inst->alg.final = crypto_xcbc_digest_final; | ||
| 253 | inst->alg.setkey = crypto_xcbc_digest_setkey; | ||
| 254 | |||
| 255 | err = shash_register_instance(tmpl, inst); | ||
| 256 | if (err) { | ||
| 257 | out_free_inst: | ||
| 258 | shash_free_instance(shash_crypto_instance(inst)); | ||
| 259 | } | ||
| 340 | 260 | ||
| 341 | out_put_alg: | 261 | out_put_alg: |
| 342 | crypto_mod_put(alg); | 262 | crypto_mod_put(alg); |
| 343 | return inst; | 263 | return err; |
| 344 | } | ||
| 345 | |||
| 346 | static void xcbc_free(struct crypto_instance *inst) | ||
| 347 | { | ||
| 348 | crypto_drop_spawn(crypto_instance_ctx(inst)); | ||
| 349 | kfree(inst); | ||
| 350 | } | 264 | } |
| 351 | 265 | ||
| 352 | static struct crypto_template crypto_xcbc_tmpl = { | 266 | static struct crypto_template crypto_xcbc_tmpl = { |
| 353 | .name = "xcbc", | 267 | .name = "xcbc", |
| 354 | .alloc = xcbc_alloc, | 268 | .create = xcbc_create, |
| 355 | .free = xcbc_free, | 269 | .free = shash_free_instance, |
| 356 | .module = THIS_MODULE, | 270 | .module = THIS_MODULE, |
| 357 | }; | 271 | }; |
| 358 | 272 | ||
