diff options
Diffstat (limited to 'crypto')
| -rw-r--r-- | crypto/Kconfig | 9 | ||||
| -rw-r--r-- | crypto/aead.c | 1 | ||||
| -rw-r--r-- | crypto/ansi_cprng.c | 82 | ||||
| -rw-r--r-- | crypto/async_tx/Kconfig | 5 | ||||
| -rw-r--r-- | crypto/async_tx/async_pq.c | 74 | ||||
| -rw-r--r-- | crypto/async_tx/async_raid6_recov.c | 100 | ||||
| -rw-r--r-- | crypto/async_tx/async_xor.c | 33 | ||||
| -rw-r--r-- | crypto/cryptd.c | 7 | ||||
| -rw-r--r-- | crypto/digest.c | 240 | ||||
| -rw-r--r-- | crypto/gcm.c | 107 | ||||
| -rw-r--r-- | crypto/hash.c | 183 | ||||
| -rw-r--r-- | crypto/proc.c | 19 | ||||
| -rw-r--r-- | crypto/testmgr.c | 11 | ||||
| -rw-r--r-- | crypto/testmgr.h | 15 |
14 files changed, 329 insertions, 557 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig index 26b5dd0cb564..81c185a6971f 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
| @@ -440,6 +440,15 @@ config CRYPTO_WP512 | |||
| 440 | See also: | 440 | See also: |
| 441 | <http://planeta.terra.com.br/informatica/paulobarreto/WhirlpoolPage.html> | 441 | <http://planeta.terra.com.br/informatica/paulobarreto/WhirlpoolPage.html> |
| 442 | 442 | ||
| 443 | config CRYPTO_GHASH_CLMUL_NI_INTEL | ||
| 444 | tristate "GHASH digest algorithm (CLMUL-NI accelerated)" | ||
| 445 | depends on (X86 || UML_X86) && 64BIT | ||
| 446 | select CRYPTO_SHASH | ||
| 447 | select CRYPTO_CRYPTD | ||
| 448 | help | ||
| 449 | GHASH is message digest algorithm for GCM (Galois/Counter Mode). | ||
| 450 | The implementation is accelerated by CLMUL-NI of Intel. | ||
| 451 | |||
| 443 | comment "Ciphers" | 452 | comment "Ciphers" |
| 444 | 453 | ||
| 445 | config CRYPTO_AES | 454 | config CRYPTO_AES |
diff --git a/crypto/aead.c b/crypto/aead.c index d9aa733db164..0a55da70845e 100644 --- a/crypto/aead.c +++ b/crypto/aead.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
| 19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
| 20 | #include <linux/rtnetlink.h> | 20 | #include <linux/rtnetlink.h> |
| 21 | #include <linux/sched.h> | ||
| 21 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
| 22 | #include <linux/seq_file.h> | 23 | #include <linux/seq_file.h> |
| 23 | 24 | ||
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c index 3aa6e3834bfe..2bc332142849 100644 --- a/crypto/ansi_cprng.c +++ b/crypto/ansi_cprng.c | |||
| @@ -85,7 +85,7 @@ static void xor_vectors(unsigned char *in1, unsigned char *in2, | |||
| 85 | * Returns DEFAULT_BLK_SZ bytes of random data per call | 85 | * Returns DEFAULT_BLK_SZ bytes of random data per call |
| 86 | * returns 0 if generation succeded, <0 if something went wrong | 86 | * returns 0 if generation succeded, <0 if something went wrong |
| 87 | */ | 87 | */ |
| 88 | static int _get_more_prng_bytes(struct prng_context *ctx) | 88 | static int _get_more_prng_bytes(struct prng_context *ctx, int cont_test) |
| 89 | { | 89 | { |
| 90 | int i; | 90 | int i; |
| 91 | unsigned char tmp[DEFAULT_BLK_SZ]; | 91 | unsigned char tmp[DEFAULT_BLK_SZ]; |
| @@ -132,7 +132,7 @@ static int _get_more_prng_bytes(struct prng_context *ctx) | |||
| 132 | */ | 132 | */ |
| 133 | if (!memcmp(ctx->rand_data, ctx->last_rand_data, | 133 | if (!memcmp(ctx->rand_data, ctx->last_rand_data, |
| 134 | DEFAULT_BLK_SZ)) { | 134 | DEFAULT_BLK_SZ)) { |
| 135 | if (fips_enabled) { | 135 | if (cont_test) { |
| 136 | panic("cprng %p Failed repetition check!\n", | 136 | panic("cprng %p Failed repetition check!\n", |
| 137 | ctx); | 137 | ctx); |
| 138 | } | 138 | } |
| @@ -185,16 +185,14 @@ static int _get_more_prng_bytes(struct prng_context *ctx) | |||
| 185 | } | 185 | } |
| 186 | 186 | ||
| 187 | /* Our exported functions */ | 187 | /* Our exported functions */ |
| 188 | static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx) | 188 | static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx, |
| 189 | int do_cont_test) | ||
| 189 | { | 190 | { |
| 190 | unsigned char *ptr = buf; | 191 | unsigned char *ptr = buf; |
| 191 | unsigned int byte_count = (unsigned int)nbytes; | 192 | unsigned int byte_count = (unsigned int)nbytes; |
| 192 | int err; | 193 | int err; |
| 193 | 194 | ||
| 194 | 195 | ||
| 195 | if (nbytes < 0) | ||
| 196 | return -EINVAL; | ||
| 197 | |||
| 198 | spin_lock_bh(&ctx->prng_lock); | 196 | spin_lock_bh(&ctx->prng_lock); |
| 199 | 197 | ||
| 200 | err = -EINVAL; | 198 | err = -EINVAL; |
| @@ -220,7 +218,7 @@ static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx) | |||
| 220 | 218 | ||
| 221 | remainder: | 219 | remainder: |
| 222 | if (ctx->rand_data_valid == DEFAULT_BLK_SZ) { | 220 | if (ctx->rand_data_valid == DEFAULT_BLK_SZ) { |
| 223 | if (_get_more_prng_bytes(ctx) < 0) { | 221 | if (_get_more_prng_bytes(ctx, do_cont_test) < 0) { |
| 224 | memset(buf, 0, nbytes); | 222 | memset(buf, 0, nbytes); |
| 225 | err = -EINVAL; | 223 | err = -EINVAL; |
| 226 | goto done; | 224 | goto done; |
| @@ -247,7 +245,7 @@ empty_rbuf: | |||
| 247 | */ | 245 | */ |
| 248 | for (; byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) { | 246 | for (; byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) { |
| 249 | if (ctx->rand_data_valid == DEFAULT_BLK_SZ) { | 247 | if (ctx->rand_data_valid == DEFAULT_BLK_SZ) { |
| 250 | if (_get_more_prng_bytes(ctx) < 0) { | 248 | if (_get_more_prng_bytes(ctx, do_cont_test) < 0) { |
| 251 | memset(buf, 0, nbytes); | 249 | memset(buf, 0, nbytes); |
| 252 | err = -EINVAL; | 250 | err = -EINVAL; |
| 253 | goto done; | 251 | goto done; |
| @@ -356,7 +354,7 @@ static int cprng_get_random(struct crypto_rng *tfm, u8 *rdata, | |||
| 356 | { | 354 | { |
| 357 | struct prng_context *prng = crypto_rng_ctx(tfm); | 355 | struct prng_context *prng = crypto_rng_ctx(tfm); |
| 358 | 356 | ||
| 359 | return get_prng_bytes(rdata, dlen, prng); | 357 | return get_prng_bytes(rdata, dlen, prng, 0); |
| 360 | } | 358 | } |
| 361 | 359 | ||
| 362 | /* | 360 | /* |
| @@ -404,19 +402,79 @@ static struct crypto_alg rng_alg = { | |||
| 404 | } | 402 | } |
| 405 | }; | 403 | }; |
| 406 | 404 | ||
| 405 | #ifdef CONFIG_CRYPTO_FIPS | ||
| 406 | static int fips_cprng_get_random(struct crypto_rng *tfm, u8 *rdata, | ||
| 407 | unsigned int dlen) | ||
| 408 | { | ||
| 409 | struct prng_context *prng = crypto_rng_ctx(tfm); | ||
| 410 | |||
| 411 | return get_prng_bytes(rdata, dlen, prng, 1); | ||
| 412 | } | ||
| 413 | |||
| 414 | static int fips_cprng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) | ||
| 415 | { | ||
| 416 | u8 rdata[DEFAULT_BLK_SZ]; | ||
| 417 | int rc; | ||
| 418 | |||
| 419 | struct prng_context *prng = crypto_rng_ctx(tfm); | ||
| 420 | |||
| 421 | rc = cprng_reset(tfm, seed, slen); | ||
| 422 | |||
| 423 | if (!rc) | ||
| 424 | goto out; | ||
| 425 | |||
| 426 | /* this primes our continuity test */ | ||
| 427 | rc = get_prng_bytes(rdata, DEFAULT_BLK_SZ, prng, 0); | ||
| 428 | prng->rand_data_valid = DEFAULT_BLK_SZ; | ||
| 429 | |||
| 430 | out: | ||
| 431 | return rc; | ||
| 432 | } | ||
| 433 | |||
| 434 | static struct crypto_alg fips_rng_alg = { | ||
| 435 | .cra_name = "fips(ansi_cprng)", | ||
| 436 | .cra_driver_name = "fips_ansi_cprng", | ||
| 437 | .cra_priority = 300, | ||
| 438 | .cra_flags = CRYPTO_ALG_TYPE_RNG, | ||
| 439 | .cra_ctxsize = sizeof(struct prng_context), | ||
| 440 | .cra_type = &crypto_rng_type, | ||
| 441 | .cra_module = THIS_MODULE, | ||
| 442 | .cra_list = LIST_HEAD_INIT(rng_alg.cra_list), | ||
| 443 | .cra_init = cprng_init, | ||
| 444 | .cra_exit = cprng_exit, | ||
| 445 | .cra_u = { | ||
| 446 | .rng = { | ||
| 447 | .rng_make_random = fips_cprng_get_random, | ||
| 448 | .rng_reset = fips_cprng_reset, | ||
| 449 | .seedsize = DEFAULT_PRNG_KSZ + 2*DEFAULT_BLK_SZ, | ||
| 450 | } | ||
| 451 | } | ||
| 452 | }; | ||
| 453 | #endif | ||
| 407 | 454 | ||
| 408 | /* Module initalization */ | 455 | /* Module initalization */ |
| 409 | static int __init prng_mod_init(void) | 456 | static int __init prng_mod_init(void) |
| 410 | { | 457 | { |
| 411 | if (fips_enabled) | 458 | int rc = 0; |
| 412 | rng_alg.cra_priority += 200; | ||
| 413 | 459 | ||
| 414 | return crypto_register_alg(&rng_alg); | 460 | rc = crypto_register_alg(&rng_alg); |
| 461 | #ifdef CONFIG_CRYPTO_FIPS | ||
| 462 | if (rc) | ||
| 463 | goto out; | ||
| 464 | |||
| 465 | rc = crypto_register_alg(&fips_rng_alg); | ||
| 466 | |||
| 467 | out: | ||
| 468 | #endif | ||
| 469 | return rc; | ||
| 415 | } | 470 | } |
| 416 | 471 | ||
| 417 | static void __exit prng_mod_fini(void) | 472 | static void __exit prng_mod_fini(void) |
| 418 | { | 473 | { |
| 419 | crypto_unregister_alg(&rng_alg); | 474 | crypto_unregister_alg(&rng_alg); |
| 475 | #ifdef CONFIG_CRYPTO_FIPS | ||
| 476 | crypto_unregister_alg(&fips_rng_alg); | ||
| 477 | #endif | ||
| 420 | return; | 478 | return; |
| 421 | } | 479 | } |
| 422 | 480 | ||
diff --git a/crypto/async_tx/Kconfig b/crypto/async_tx/Kconfig index e5aeb2b79e6f..e28e276ac611 100644 --- a/crypto/async_tx/Kconfig +++ b/crypto/async_tx/Kconfig | |||
| @@ -23,3 +23,8 @@ config ASYNC_RAID6_RECOV | |||
| 23 | select ASYNC_CORE | 23 | select ASYNC_CORE |
| 24 | select ASYNC_PQ | 24 | select ASYNC_PQ |
| 25 | 25 | ||
| 26 | config ASYNC_TX_DISABLE_PQ_VAL_DMA | ||
| 27 | bool | ||
| 28 | |||
| 29 | config ASYNC_TX_DISABLE_XOR_VAL_DMA | ||
| 30 | bool | ||
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index b88db6d1dc65..ec87f53d5059 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c | |||
| @@ -26,14 +26,10 @@ | |||
| 26 | #include <linux/async_tx.h> | 26 | #include <linux/async_tx.h> |
| 27 | 27 | ||
| 28 | /** | 28 | /** |
| 29 | * scribble - space to hold throwaway P buffer for synchronous gen_syndrome | 29 | * pq_scribble_page - space to hold throwaway P or Q buffer for |
| 30 | * synchronous gen_syndrome | ||
| 30 | */ | 31 | */ |
| 31 | static struct page *scribble; | 32 | static struct page *pq_scribble_page; |
| 32 | |||
| 33 | static bool is_raid6_zero_block(struct page *p) | ||
| 34 | { | ||
| 35 | return p == (void *) raid6_empty_zero_page; | ||
| 36 | } | ||
| 37 | 33 | ||
| 38 | /* the struct page *blocks[] parameter passed to async_gen_syndrome() | 34 | /* the struct page *blocks[] parameter passed to async_gen_syndrome() |
| 39 | * and async_syndrome_val() contains the 'P' destination address at | 35 | * and async_syndrome_val() contains the 'P' destination address at |
| @@ -83,7 +79,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, | |||
| 83 | * sources and update the coefficients accordingly | 79 | * sources and update the coefficients accordingly |
| 84 | */ | 80 | */ |
| 85 | for (i = 0, idx = 0; i < src_cnt; i++) { | 81 | for (i = 0, idx = 0; i < src_cnt; i++) { |
| 86 | if (is_raid6_zero_block(blocks[i])) | 82 | if (blocks[i] == NULL) |
| 87 | continue; | 83 | continue; |
| 88 | dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len, | 84 | dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len, |
| 89 | DMA_TO_DEVICE); | 85 | DMA_TO_DEVICE); |
| @@ -160,9 +156,9 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | |||
| 160 | srcs = (void **) blocks; | 156 | srcs = (void **) blocks; |
| 161 | 157 | ||
| 162 | for (i = 0; i < disks; i++) { | 158 | for (i = 0; i < disks; i++) { |
| 163 | if (is_raid6_zero_block(blocks[i])) { | 159 | if (blocks[i] == NULL) { |
| 164 | BUG_ON(i > disks - 3); /* P or Q can't be zero */ | 160 | BUG_ON(i > disks - 3); /* P or Q can't be zero */ |
| 165 | srcs[i] = blocks[i]; | 161 | srcs[i] = (void*)raid6_empty_zero_page; |
| 166 | } else | 162 | } else |
| 167 | srcs[i] = page_address(blocks[i]) + offset; | 163 | srcs[i] = page_address(blocks[i]) + offset; |
| 168 | } | 164 | } |
| @@ -186,10 +182,14 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | |||
| 186 | * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <= | 182 | * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <= |
| 187 | * PAGE_SIZE as a temporary buffer of this size is used in the | 183 | * PAGE_SIZE as a temporary buffer of this size is used in the |
| 188 | * synchronous path. 'disks' always accounts for both destination | 184 | * synchronous path. 'disks' always accounts for both destination |
| 189 | * buffers. | 185 | * buffers. If any source buffers (blocks[i] where i < disks - 2) are |
| 186 | * set to NULL those buffers will be replaced with the raid6_zero_page | ||
| 187 | * in the synchronous path and omitted in the hardware-asynchronous | ||
| 188 | * path. | ||
| 190 | * | 189 | * |
| 191 | * 'blocks' note: if submit->scribble is NULL then the contents of | 190 | * 'blocks' note: if submit->scribble is NULL then the contents of |
| 192 | * 'blocks' may be overridden | 191 | * 'blocks' may be overwritten to perform address conversions |
| 192 | * (dma_map_page() or page_address()). | ||
| 193 | */ | 193 | */ |
| 194 | struct dma_async_tx_descriptor * | 194 | struct dma_async_tx_descriptor * |
| 195 | async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | 195 | async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, |
| @@ -227,11 +227,11 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | |||
| 227 | async_tx_quiesce(&submit->depend_tx); | 227 | async_tx_quiesce(&submit->depend_tx); |
| 228 | 228 | ||
| 229 | if (!P(blocks, disks)) { | 229 | if (!P(blocks, disks)) { |
| 230 | P(blocks, disks) = scribble; | 230 | P(blocks, disks) = pq_scribble_page; |
| 231 | BUG_ON(len + offset > PAGE_SIZE); | 231 | BUG_ON(len + offset > PAGE_SIZE); |
| 232 | } | 232 | } |
| 233 | if (!Q(blocks, disks)) { | 233 | if (!Q(blocks, disks)) { |
| 234 | Q(blocks, disks) = scribble; | 234 | Q(blocks, disks) = pq_scribble_page; |
| 235 | BUG_ON(len + offset > PAGE_SIZE); | 235 | BUG_ON(len + offset > PAGE_SIZE); |
| 236 | } | 236 | } |
| 237 | do_sync_gen_syndrome(blocks, offset, disks, len, submit); | 237 | do_sync_gen_syndrome(blocks, offset, disks, len, submit); |
| @@ -240,6 +240,16 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | |||
| 240 | } | 240 | } |
| 241 | EXPORT_SYMBOL_GPL(async_gen_syndrome); | 241 | EXPORT_SYMBOL_GPL(async_gen_syndrome); |
| 242 | 242 | ||
| 243 | static inline struct dma_chan * | ||
| 244 | pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len) | ||
| 245 | { | ||
| 246 | #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA | ||
| 247 | return NULL; | ||
| 248 | #endif | ||
| 249 | return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0, blocks, | ||
| 250 | disks, len); | ||
| 251 | } | ||
| 252 | |||
| 243 | /** | 253 | /** |
| 244 | * async_syndrome_val - asynchronously validate a raid6 syndrome | 254 | * async_syndrome_val - asynchronously validate a raid6 syndrome |
| 245 | * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 | 255 | * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 |
| @@ -260,13 +270,13 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, | |||
| 260 | size_t len, enum sum_check_flags *pqres, struct page *spare, | 270 | size_t len, enum sum_check_flags *pqres, struct page *spare, |
| 261 | struct async_submit_ctl *submit) | 271 | struct async_submit_ctl *submit) |
| 262 | { | 272 | { |
| 263 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ_VAL, | 273 | struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len); |
| 264 | NULL, 0, blocks, disks, | ||
| 265 | len); | ||
| 266 | struct dma_device *device = chan ? chan->device : NULL; | 274 | struct dma_device *device = chan ? chan->device : NULL; |
| 267 | struct dma_async_tx_descriptor *tx; | 275 | struct dma_async_tx_descriptor *tx; |
| 276 | unsigned char coefs[disks-2]; | ||
| 268 | enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; | 277 | enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; |
| 269 | dma_addr_t *dma_src = NULL; | 278 | dma_addr_t *dma_src = NULL; |
| 279 | int src_cnt = 0; | ||
| 270 | 280 | ||
| 271 | BUG_ON(disks < 4); | 281 | BUG_ON(disks < 4); |
| 272 | 282 | ||
| @@ -285,22 +295,32 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, | |||
| 285 | __func__, disks, len); | 295 | __func__, disks, len); |
| 286 | if (!P(blocks, disks)) | 296 | if (!P(blocks, disks)) |
| 287 | dma_flags |= DMA_PREP_PQ_DISABLE_P; | 297 | dma_flags |= DMA_PREP_PQ_DISABLE_P; |
| 298 | else | ||
| 299 | pq[0] = dma_map_page(dev, P(blocks, disks), | ||
| 300 | offset, len, | ||
| 301 | DMA_TO_DEVICE); | ||
| 288 | if (!Q(blocks, disks)) | 302 | if (!Q(blocks, disks)) |
| 289 | dma_flags |= DMA_PREP_PQ_DISABLE_Q; | 303 | dma_flags |= DMA_PREP_PQ_DISABLE_Q; |
| 304 | else | ||
| 305 | pq[1] = dma_map_page(dev, Q(blocks, disks), | ||
| 306 | offset, len, | ||
| 307 | DMA_TO_DEVICE); | ||
| 308 | |||
| 290 | if (submit->flags & ASYNC_TX_FENCE) | 309 | if (submit->flags & ASYNC_TX_FENCE) |
| 291 | dma_flags |= DMA_PREP_FENCE; | 310 | dma_flags |= DMA_PREP_FENCE; |
| 292 | for (i = 0; i < disks; i++) | 311 | for (i = 0; i < disks-2; i++) |
| 293 | if (likely(blocks[i])) { | 312 | if (likely(blocks[i])) { |
| 294 | BUG_ON(is_raid6_zero_block(blocks[i])); | 313 | dma_src[src_cnt] = dma_map_page(dev, blocks[i], |
| 295 | dma_src[i] = dma_map_page(dev, blocks[i], | 314 | offset, len, |
| 296 | offset, len, | 315 | DMA_TO_DEVICE); |
| 297 | DMA_TO_DEVICE); | 316 | coefs[src_cnt] = raid6_gfexp[i]; |
| 317 | src_cnt++; | ||
| 298 | } | 318 | } |
| 299 | 319 | ||
| 300 | for (;;) { | 320 | for (;;) { |
| 301 | tx = device->device_prep_dma_pq_val(chan, pq, dma_src, | 321 | tx = device->device_prep_dma_pq_val(chan, pq, dma_src, |
| 302 | disks - 2, | 322 | src_cnt, |
| 303 | raid6_gfexp, | 323 | coefs, |
| 304 | len, pqres, | 324 | len, pqres, |
| 305 | dma_flags); | 325 | dma_flags); |
| 306 | if (likely(tx)) | 326 | if (likely(tx)) |
| @@ -373,9 +393,9 @@ EXPORT_SYMBOL_GPL(async_syndrome_val); | |||
| 373 | 393 | ||
| 374 | static int __init async_pq_init(void) | 394 | static int __init async_pq_init(void) |
| 375 | { | 395 | { |
| 376 | scribble = alloc_page(GFP_KERNEL); | 396 | pq_scribble_page = alloc_page(GFP_KERNEL); |
| 377 | 397 | ||
| 378 | if (scribble) | 398 | if (pq_scribble_page) |
| 379 | return 0; | 399 | return 0; |
| 380 | 400 | ||
| 381 | pr_err("%s: failed to allocate required spare page\n", __func__); | 401 | pr_err("%s: failed to allocate required spare page\n", __func__); |
| @@ -385,7 +405,7 @@ static int __init async_pq_init(void) | |||
| 385 | 405 | ||
| 386 | static void __exit async_pq_exit(void) | 406 | static void __exit async_pq_exit(void) |
| 387 | { | 407 | { |
| 388 | put_page(scribble); | 408 | put_page(pq_scribble_page); |
| 389 | } | 409 | } |
| 390 | 410 | ||
| 391 | module_init(async_pq_init); | 411 | module_init(async_pq_init); |
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c index 6d73dde4786d..943f2abac9b4 100644 --- a/crypto/async_tx/async_raid6_recov.c +++ b/crypto/async_tx/async_raid6_recov.c | |||
| @@ -131,8 +131,8 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, | |||
| 131 | } | 131 | } |
| 132 | 132 | ||
| 133 | static struct dma_async_tx_descriptor * | 133 | static struct dma_async_tx_descriptor * |
| 134 | __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks, | 134 | __2data_recov_4(int disks, size_t bytes, int faila, int failb, |
| 135 | struct async_submit_ctl *submit) | 135 | struct page **blocks, struct async_submit_ctl *submit) |
| 136 | { | 136 | { |
| 137 | struct dma_async_tx_descriptor *tx = NULL; | 137 | struct dma_async_tx_descriptor *tx = NULL; |
| 138 | struct page *p, *q, *a, *b; | 138 | struct page *p, *q, *a, *b; |
| @@ -143,8 +143,8 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks, | |||
| 143 | void *cb_param = submit->cb_param; | 143 | void *cb_param = submit->cb_param; |
| 144 | void *scribble = submit->scribble; | 144 | void *scribble = submit->scribble; |
| 145 | 145 | ||
| 146 | p = blocks[4-2]; | 146 | p = blocks[disks-2]; |
| 147 | q = blocks[4-1]; | 147 | q = blocks[disks-1]; |
| 148 | 148 | ||
| 149 | a = blocks[faila]; | 149 | a = blocks[faila]; |
| 150 | b = blocks[failb]; | 150 | b = blocks[failb]; |
| @@ -170,8 +170,8 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks, | |||
| 170 | } | 170 | } |
| 171 | 171 | ||
| 172 | static struct dma_async_tx_descriptor * | 172 | static struct dma_async_tx_descriptor * |
| 173 | __2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks, | 173 | __2data_recov_5(int disks, size_t bytes, int faila, int failb, |
| 174 | struct async_submit_ctl *submit) | 174 | struct page **blocks, struct async_submit_ctl *submit) |
| 175 | { | 175 | { |
| 176 | struct dma_async_tx_descriptor *tx = NULL; | 176 | struct dma_async_tx_descriptor *tx = NULL; |
| 177 | struct page *p, *q, *g, *dp, *dq; | 177 | struct page *p, *q, *g, *dp, *dq; |
| @@ -181,21 +181,22 @@ __2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks, | |||
| 181 | dma_async_tx_callback cb_fn = submit->cb_fn; | 181 | dma_async_tx_callback cb_fn = submit->cb_fn; |
| 182 | void *cb_param = submit->cb_param; | 182 | void *cb_param = submit->cb_param; |
| 183 | void *scribble = submit->scribble; | 183 | void *scribble = submit->scribble; |
| 184 | int uninitialized_var(good); | 184 | int good_srcs, good, i; |
| 185 | int i; | ||
| 186 | 185 | ||
| 187 | for (i = 0; i < 3; i++) { | 186 | good_srcs = 0; |
| 187 | good = -1; | ||
| 188 | for (i = 0; i < disks-2; i++) { | ||
| 189 | if (blocks[i] == NULL) | ||
| 190 | continue; | ||
| 188 | if (i == faila || i == failb) | 191 | if (i == faila || i == failb) |
| 189 | continue; | 192 | continue; |
| 190 | else { | 193 | good = i; |
| 191 | good = i; | 194 | good_srcs++; |
| 192 | break; | ||
| 193 | } | ||
| 194 | } | 195 | } |
| 195 | BUG_ON(i >= 3); | 196 | BUG_ON(good_srcs > 1); |
| 196 | 197 | ||
| 197 | p = blocks[5-2]; | 198 | p = blocks[disks-2]; |
| 198 | q = blocks[5-1]; | 199 | q = blocks[disks-1]; |
| 199 | g = blocks[good]; | 200 | g = blocks[good]; |
| 200 | 201 | ||
| 201 | /* Compute syndrome with zero for the missing data pages | 202 | /* Compute syndrome with zero for the missing data pages |
| @@ -263,10 +264,10 @@ __2data_recov_n(int disks, size_t bytes, int faila, int failb, | |||
| 263 | * delta p and delta q | 264 | * delta p and delta q |
| 264 | */ | 265 | */ |
| 265 | dp = blocks[faila]; | 266 | dp = blocks[faila]; |
| 266 | blocks[faila] = (void *)raid6_empty_zero_page; | 267 | blocks[faila] = NULL; |
| 267 | blocks[disks-2] = dp; | 268 | blocks[disks-2] = dp; |
| 268 | dq = blocks[failb]; | 269 | dq = blocks[failb]; |
| 269 | blocks[failb] = (void *)raid6_empty_zero_page; | 270 | blocks[failb] = NULL; |
| 270 | blocks[disks-1] = dq; | 271 | blocks[disks-1] = dq; |
| 271 | 272 | ||
| 272 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); | 273 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); |
| @@ -323,6 +324,8 @@ struct dma_async_tx_descriptor * | |||
| 323 | async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, | 324 | async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, |
| 324 | struct page **blocks, struct async_submit_ctl *submit) | 325 | struct page **blocks, struct async_submit_ctl *submit) |
| 325 | { | 326 | { |
| 327 | int non_zero_srcs, i; | ||
| 328 | |||
| 326 | BUG_ON(faila == failb); | 329 | BUG_ON(faila == failb); |
| 327 | if (failb < faila) | 330 | if (failb < faila) |
| 328 | swap(faila, failb); | 331 | swap(faila, failb); |
| @@ -334,11 +337,13 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, | |||
| 334 | */ | 337 | */ |
| 335 | if (!submit->scribble) { | 338 | if (!submit->scribble) { |
| 336 | void **ptrs = (void **) blocks; | 339 | void **ptrs = (void **) blocks; |
| 337 | int i; | ||
| 338 | 340 | ||
| 339 | async_tx_quiesce(&submit->depend_tx); | 341 | async_tx_quiesce(&submit->depend_tx); |
| 340 | for (i = 0; i < disks; i++) | 342 | for (i = 0; i < disks; i++) |
| 341 | ptrs[i] = page_address(blocks[i]); | 343 | if (blocks[i] == NULL) |
| 344 | ptrs[i] = (void *) raid6_empty_zero_page; | ||
| 345 | else | ||
| 346 | ptrs[i] = page_address(blocks[i]); | ||
| 342 | 347 | ||
| 343 | raid6_2data_recov(disks, bytes, faila, failb, ptrs); | 348 | raid6_2data_recov(disks, bytes, faila, failb, ptrs); |
| 344 | 349 | ||
| @@ -347,19 +352,30 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, | |||
| 347 | return NULL; | 352 | return NULL; |
| 348 | } | 353 | } |
| 349 | 354 | ||
| 350 | switch (disks) { | 355 | non_zero_srcs = 0; |
| 351 | case 4: | 356 | for (i = 0; i < disks-2 && non_zero_srcs < 4; i++) |
| 357 | if (blocks[i]) | ||
| 358 | non_zero_srcs++; | ||
| 359 | switch (non_zero_srcs) { | ||
| 360 | case 0: | ||
| 361 | case 1: | ||
| 362 | /* There must be at least 2 sources - the failed devices. */ | ||
| 363 | BUG(); | ||
| 364 | |||
| 365 | case 2: | ||
| 352 | /* dma devices do not uniformly understand a zero source pq | 366 | /* dma devices do not uniformly understand a zero source pq |
| 353 | * operation (in contrast to the synchronous case), so | 367 | * operation (in contrast to the synchronous case), so |
| 354 | * explicitly handle the 4 disk special case | 368 | * explicitly handle the special case of a 4 disk array with |
| 369 | * both data disks missing. | ||
| 355 | */ | 370 | */ |
| 356 | return __2data_recov_4(bytes, faila, failb, blocks, submit); | 371 | return __2data_recov_4(disks, bytes, faila, failb, blocks, submit); |
| 357 | case 5: | 372 | case 3: |
| 358 | /* dma devices do not uniformly understand a single | 373 | /* dma devices do not uniformly understand a single |
| 359 | * source pq operation (in contrast to the synchronous | 374 | * source pq operation (in contrast to the synchronous |
| 360 | * case), so explicitly handle the 5 disk special case | 375 | * case), so explicitly handle the special case of a 5 disk |
| 376 | * array with 2 of 3 data disks missing. | ||
| 361 | */ | 377 | */ |
| 362 | return __2data_recov_5(bytes, faila, failb, blocks, submit); | 378 | return __2data_recov_5(disks, bytes, faila, failb, blocks, submit); |
| 363 | default: | 379 | default: |
| 364 | return __2data_recov_n(disks, bytes, faila, failb, blocks, submit); | 380 | return __2data_recov_n(disks, bytes, faila, failb, blocks, submit); |
| 365 | } | 381 | } |
| @@ -385,6 +401,7 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, | |||
| 385 | dma_async_tx_callback cb_fn = submit->cb_fn; | 401 | dma_async_tx_callback cb_fn = submit->cb_fn; |
| 386 | void *cb_param = submit->cb_param; | 402 | void *cb_param = submit->cb_param; |
| 387 | void *scribble = submit->scribble; | 403 | void *scribble = submit->scribble; |
| 404 | int good_srcs, good, i; | ||
| 388 | struct page *srcs[2]; | 405 | struct page *srcs[2]; |
| 389 | 406 | ||
| 390 | pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); | 407 | pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); |
| @@ -394,11 +411,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, | |||
| 394 | */ | 411 | */ |
| 395 | if (!scribble) { | 412 | if (!scribble) { |
| 396 | void **ptrs = (void **) blocks; | 413 | void **ptrs = (void **) blocks; |
| 397 | int i; | ||
| 398 | 414 | ||
| 399 | async_tx_quiesce(&submit->depend_tx); | 415 | async_tx_quiesce(&submit->depend_tx); |
| 400 | for (i = 0; i < disks; i++) | 416 | for (i = 0; i < disks; i++) |
| 401 | ptrs[i] = page_address(blocks[i]); | 417 | if (blocks[i] == NULL) |
| 418 | ptrs[i] = (void*)raid6_empty_zero_page; | ||
| 419 | else | ||
| 420 | ptrs[i] = page_address(blocks[i]); | ||
| 402 | 421 | ||
| 403 | raid6_datap_recov(disks, bytes, faila, ptrs); | 422 | raid6_datap_recov(disks, bytes, faila, ptrs); |
| 404 | 423 | ||
| @@ -407,6 +426,20 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, | |||
| 407 | return NULL; | 426 | return NULL; |
| 408 | } | 427 | } |
| 409 | 428 | ||
| 429 | good_srcs = 0; | ||
| 430 | good = -1; | ||
| 431 | for (i = 0; i < disks-2; i++) { | ||
| 432 | if (i == faila) | ||
| 433 | continue; | ||
| 434 | if (blocks[i]) { | ||
| 435 | good = i; | ||
| 436 | good_srcs++; | ||
| 437 | if (good_srcs > 1) | ||
| 438 | break; | ||
| 439 | } | ||
| 440 | } | ||
| 441 | BUG_ON(good_srcs == 0); | ||
| 442 | |||
| 410 | p = blocks[disks-2]; | 443 | p = blocks[disks-2]; |
| 411 | q = blocks[disks-1]; | 444 | q = blocks[disks-1]; |
| 412 | 445 | ||
| @@ -414,14 +447,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, | |||
| 414 | * Use the dead data page as temporary storage for delta q | 447 | * Use the dead data page as temporary storage for delta q |
| 415 | */ | 448 | */ |
| 416 | dq = blocks[faila]; | 449 | dq = blocks[faila]; |
| 417 | blocks[faila] = (void *)raid6_empty_zero_page; | 450 | blocks[faila] = NULL; |
| 418 | blocks[disks-1] = dq; | 451 | blocks[disks-1] = dq; |
| 419 | 452 | ||
| 420 | /* in the 4 disk case we only need to perform a single source | 453 | /* in the 4-disk case we only need to perform a single source |
| 421 | * multiplication | 454 | * multiplication with the one good data block. |
| 422 | */ | 455 | */ |
| 423 | if (disks == 4) { | 456 | if (good_srcs == 1) { |
| 424 | int good = faila == 0 ? 1 : 0; | ||
| 425 | struct page *g = blocks[good]; | 457 | struct page *g = blocks[good]; |
| 426 | 458 | ||
| 427 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, | 459 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, |
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index b459a9034aac..079ae8ca590b 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c | |||
| @@ -44,20 +44,23 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, | |||
| 44 | void *cb_param_orig = submit->cb_param; | 44 | void *cb_param_orig = submit->cb_param; |
| 45 | enum async_tx_flags flags_orig = submit->flags; | 45 | enum async_tx_flags flags_orig = submit->flags; |
| 46 | enum dma_ctrl_flags dma_flags; | 46 | enum dma_ctrl_flags dma_flags; |
| 47 | int xor_src_cnt; | 47 | int xor_src_cnt = 0; |
| 48 | dma_addr_t dma_dest; | 48 | dma_addr_t dma_dest; |
| 49 | 49 | ||
| 50 | /* map the dest bidrectional in case it is re-used as a source */ | 50 | /* map the dest bidrectional in case it is re-used as a source */ |
| 51 | dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL); | 51 | dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL); |
| 52 | for (i = 0; i < src_cnt; i++) { | 52 | for (i = 0; i < src_cnt; i++) { |
| 53 | /* only map the dest once */ | 53 | /* only map the dest once */ |
| 54 | if (!src_list[i]) | ||
| 55 | continue; | ||
| 54 | if (unlikely(src_list[i] == dest)) { | 56 | if (unlikely(src_list[i] == dest)) { |
| 55 | dma_src[i] = dma_dest; | 57 | dma_src[xor_src_cnt++] = dma_dest; |
| 56 | continue; | 58 | continue; |
| 57 | } | 59 | } |
| 58 | dma_src[i] = dma_map_page(dma->dev, src_list[i], offset, | 60 | dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset, |
| 59 | len, DMA_TO_DEVICE); | 61 | len, DMA_TO_DEVICE); |
| 60 | } | 62 | } |
| 63 | src_cnt = xor_src_cnt; | ||
| 61 | 64 | ||
| 62 | while (src_cnt) { | 65 | while (src_cnt) { |
| 63 | submit->flags = flags_orig; | 66 | submit->flags = flags_orig; |
| @@ -123,7 +126,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, | |||
| 123 | int src_cnt, size_t len, struct async_submit_ctl *submit) | 126 | int src_cnt, size_t len, struct async_submit_ctl *submit) |
| 124 | { | 127 | { |
| 125 | int i; | 128 | int i; |
| 126 | int xor_src_cnt; | 129 | int xor_src_cnt = 0; |
| 127 | int src_off = 0; | 130 | int src_off = 0; |
| 128 | void *dest_buf; | 131 | void *dest_buf; |
| 129 | void **srcs; | 132 | void **srcs; |
| @@ -135,8 +138,9 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, | |||
| 135 | 138 | ||
| 136 | /* convert to buffer pointers */ | 139 | /* convert to buffer pointers */ |
| 137 | for (i = 0; i < src_cnt; i++) | 140 | for (i = 0; i < src_cnt; i++) |
| 138 | srcs[i] = page_address(src_list[i]) + offset; | 141 | if (src_list[i]) |
| 139 | 142 | srcs[xor_src_cnt++] = page_address(src_list[i]) + offset; | |
| 143 | src_cnt = xor_src_cnt; | ||
| 140 | /* set destination address */ | 144 | /* set destination address */ |
| 141 | dest_buf = page_address(dest) + offset; | 145 | dest_buf = page_address(dest) + offset; |
| 142 | 146 | ||
| @@ -230,6 +234,17 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len) | |||
| 230 | memcmp(a, a + 4, len - 4) == 0); | 234 | memcmp(a, a + 4, len - 4) == 0); |
| 231 | } | 235 | } |
| 232 | 236 | ||
| 237 | static inline struct dma_chan * | ||
| 238 | xor_val_chan(struct async_submit_ctl *submit, struct page *dest, | ||
| 239 | struct page **src_list, int src_cnt, size_t len) | ||
| 240 | { | ||
| 241 | #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA | ||
| 242 | return NULL; | ||
| 243 | #endif | ||
| 244 | return async_tx_find_channel(submit, DMA_XOR_VAL, &dest, 1, src_list, | ||
| 245 | src_cnt, len); | ||
| 246 | } | ||
| 247 | |||
| 233 | /** | 248 | /** |
| 234 | * async_xor_val - attempt a xor parity check with a dma engine. | 249 | * async_xor_val - attempt a xor parity check with a dma engine. |
| 235 | * @dest: destination page used if the xor is performed synchronously | 250 | * @dest: destination page used if the xor is performed synchronously |
| @@ -251,9 +266,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, | |||
| 251 | int src_cnt, size_t len, enum sum_check_flags *result, | 266 | int src_cnt, size_t len, enum sum_check_flags *result, |
| 252 | struct async_submit_ctl *submit) | 267 | struct async_submit_ctl *submit) |
| 253 | { | 268 | { |
| 254 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR_VAL, | 269 | struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len); |
| 255 | &dest, 1, src_list, | ||
| 256 | src_cnt, len); | ||
| 257 | struct dma_device *device = chan ? chan->device : NULL; | 270 | struct dma_device *device = chan ? chan->device : NULL; |
| 258 | struct dma_async_tx_descriptor *tx = NULL; | 271 | struct dma_async_tx_descriptor *tx = NULL; |
| 259 | dma_addr_t *dma_src = NULL; | 272 | dma_addr_t *dma_src = NULL; |
diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 35335825a4ef..f8ae0d94a647 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c | |||
| @@ -711,6 +711,13 @@ struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm) | |||
| 711 | } | 711 | } |
| 712 | EXPORT_SYMBOL_GPL(cryptd_ahash_child); | 712 | EXPORT_SYMBOL_GPL(cryptd_ahash_child); |
| 713 | 713 | ||
| 714 | struct shash_desc *cryptd_shash_desc(struct ahash_request *req) | ||
| 715 | { | ||
| 716 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | ||
| 717 | return &rctx->desc; | ||
| 718 | } | ||
| 719 | EXPORT_SYMBOL_GPL(cryptd_shash_desc); | ||
| 720 | |||
| 714 | void cryptd_free_ahash(struct cryptd_ahash *tfm) | 721 | void cryptd_free_ahash(struct cryptd_ahash *tfm) |
| 715 | { | 722 | { |
| 716 | crypto_free_ahash(&tfm->base); | 723 | crypto_free_ahash(&tfm->base); |
diff --git a/crypto/digest.c b/crypto/digest.c deleted file mode 100644 index 5d3f1303da98..000000000000 --- a/crypto/digest.c +++ /dev/null | |||
| @@ -1,240 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Cryptographic API. | ||
| 3 | * | ||
| 4 | * Digest operations. | ||
| 5 | * | ||
| 6 | * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms of the GNU General Public License as published by the Free | ||
| 10 | * Software Foundation; either version 2 of the License, or (at your option) | ||
| 11 | * any later version. | ||
| 12 | * | ||
| 13 | */ | ||
| 14 | |||
| 15 | #include <crypto/internal/hash.h> | ||
| 16 | #include <crypto/scatterwalk.h> | ||
| 17 | #include <linux/mm.h> | ||
| 18 | #include <linux/errno.h> | ||
| 19 | #include <linux/hardirq.h> | ||
| 20 | #include <linux/highmem.h> | ||
| 21 | #include <linux/kernel.h> | ||
| 22 | #include <linux/module.h> | ||
| 23 | #include <linux/scatterlist.h> | ||
| 24 | |||
| 25 | #include "internal.h" | ||
| 26 | |||
| 27 | static int init(struct hash_desc *desc) | ||
| 28 | { | ||
| 29 | struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm); | ||
| 30 | |||
| 31 | tfm->__crt_alg->cra_digest.dia_init(tfm); | ||
| 32 | return 0; | ||
| 33 | } | ||
| 34 | |||
| 35 | static int update2(struct hash_desc *desc, | ||
| 36 | struct scatterlist *sg, unsigned int nbytes) | ||
| 37 | { | ||
| 38 | struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm); | ||
| 39 | unsigned int alignmask = crypto_tfm_alg_alignmask(tfm); | ||
| 40 | |||
| 41 | if (!nbytes) | ||
| 42 | return 0; | ||
| 43 | |||
| 44 | for (;;) { | ||
| 45 | struct page *pg = sg_page(sg); | ||
| 46 | unsigned int offset = sg->offset; | ||
| 47 | unsigned int l = sg->length; | ||
| 48 | |||
| 49 | if (unlikely(l > nbytes)) | ||
| 50 | l = nbytes; | ||
| 51 | nbytes -= l; | ||
| 52 | |||
| 53 | do { | ||
| 54 | unsigned int bytes_from_page = min(l, ((unsigned int) | ||
| 55 | (PAGE_SIZE)) - | ||
| 56 | offset); | ||
| 57 | char *src = crypto_kmap(pg, 0); | ||
| 58 | char *p = src + offset; | ||
| 59 | |||
| 60 | if (unlikely(offset & alignmask)) { | ||
| 61 | unsigned int bytes = | ||
| 62 | alignmask + 1 - (offset & alignmask); | ||
| 63 | bytes = min(bytes, bytes_from_page); | ||
| 64 | tfm->__crt_alg->cra_digest.dia_update(tfm, p, | ||
| 65 | bytes); | ||
| 66 | p += bytes; | ||
| 67 | bytes_from_page -= bytes; | ||
| 68 | l -= bytes; | ||
| 69 | } | ||
| 70 | tfm->__crt_alg->cra_digest.dia_update(tfm, p, | ||
| 71 | bytes_from_page); | ||
| 72 | crypto_kunmap(src, 0); | ||
| 73 | crypto_yield(desc->flags); | ||
| 74 | offset = 0; | ||
| 75 | pg++; | ||
| 76 | l -= bytes_from_page; | ||
| 77 | } while (l > 0); | ||
| 78 | |||
| 79 | if (!nbytes) | ||
| 80 | break; | ||
| 81 | sg = scatterwalk_sg_next(sg); | ||
| 82 | } | ||
| 83 | |||
| 84 | return 0; | ||
| 85 | } | ||
| 86 | |||
| 87 | static int update(struct hash_desc *desc, | ||
| 88 | struct scatterlist *sg, unsigned int nbytes) | ||
| 89 | { | ||
| 90 | if (WARN_ON_ONCE(in_irq())) | ||
| 91 | return -EDEADLK; | ||
| 92 | return update2(desc, sg, nbytes); | ||
| 93 | } | ||
| 94 | |||
| 95 | static int final(struct hash_desc *desc, u8 *out) | ||
| 96 | { | ||
| 97 | struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm); | ||
| 98 | unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); | ||
| 99 | struct digest_alg *digest = &tfm->__crt_alg->cra_digest; | ||
| 100 | |||
| 101 | if (unlikely((unsigned long)out & alignmask)) { | ||
| 102 | unsigned long align = alignmask + 1; | ||
| 103 | unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm); | ||
| 104 | u8 *dst = (u8 *)ALIGN(addr, align) + | ||
| 105 | ALIGN(tfm->__crt_alg->cra_ctxsize, align); | ||
| 106 | |||
| 107 | digest->dia_final(tfm, dst); | ||
| 108 | memcpy(out, dst, digest->dia_digestsize); | ||
| 109 | } else | ||
| 110 | digest->dia_final(tfm, out); | ||
| 111 | |||
| 112 | return 0; | ||
| 113 | } | ||
| 114 | |||
| 115 | static int nosetkey(struct crypto_hash *tfm, const u8 *key, unsigned int keylen) | ||
| 116 | { | ||
| 117 | crypto_hash_clear_flags(tfm, CRYPTO_TFM_RES_MASK); | ||
| 118 | return -ENOSYS; | ||
| 119 | } | ||
| 120 | |||
| 121 | static int setkey(struct crypto_hash *hash, const u8 *key, unsigned int keylen) | ||
| 122 | { | ||
| 123 | struct crypto_tfm *tfm = crypto_hash_tfm(hash); | ||
| 124 | |||
| 125 | crypto_hash_clear_flags(hash, CRYPTO_TFM_RES_MASK); | ||
| 126 | return tfm->__crt_alg->cra_digest.dia_setkey(tfm, key, keylen); | ||
| 127 | } | ||
| 128 | |||
| 129 | static int digest(struct hash_desc *desc, | ||
| 130 | struct scatterlist *sg, unsigned int nbytes, u8 *out) | ||
| 131 | { | ||
| 132 | if (WARN_ON_ONCE(in_irq())) | ||
| 133 | return -EDEADLK; | ||
| 134 | |||
| 135 | init(desc); | ||
| 136 | update2(desc, sg, nbytes); | ||
| 137 | return final(desc, out); | ||
| 138 | } | ||
| 139 | |||
| 140 | int crypto_init_digest_ops(struct crypto_tfm *tfm) | ||
| 141 | { | ||
| 142 | struct hash_tfm *ops = &tfm->crt_hash; | ||
| 143 | struct digest_alg *dalg = &tfm->__crt_alg->cra_digest; | ||
| 144 | |||
| 145 | if (dalg->dia_digestsize > PAGE_SIZE / 8) | ||
| 146 | return -EINVAL; | ||
| 147 | |||
| 148 | ops->init = init; | ||
| 149 | ops->update = update; | ||
| 150 | ops->final = final; | ||
| 151 | ops->digest = digest; | ||
| 152 | ops->setkey = dalg->dia_setkey ? setkey : nosetkey; | ||
| 153 | ops->digestsize = dalg->dia_digestsize; | ||
| 154 | |||
| 155 | return 0; | ||
| 156 | } | ||
| 157 | |||
| 158 | void crypto_exit_digest_ops(struct crypto_tfm *tfm) | ||
| 159 | { | ||
| 160 | } | ||
| 161 | |||
| 162 | static int digest_async_nosetkey(struct crypto_ahash *tfm_async, const u8 *key, | ||
| 163 | unsigned int keylen) | ||
| 164 | { | ||
| 165 | crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK); | ||
| 166 | return -ENOSYS; | ||
| 167 | } | ||
| 168 | |||
| 169 | static int digest_async_setkey(struct crypto_ahash *tfm_async, const u8 *key, | ||
| 170 | unsigned int keylen) | ||
| 171 | { | ||
| 172 | struct crypto_tfm *tfm = crypto_ahash_tfm(tfm_async); | ||
| 173 | struct digest_alg *dalg = &tfm->__crt_alg->cra_digest; | ||
| 174 | |||
| 175 | crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK); | ||
| 176 | return dalg->dia_setkey(tfm, key, keylen); | ||
| 177 | } | ||
| 178 | |||
| 179 | static int digest_async_init(struct ahash_request *req) | ||
| 180 | { | ||
| 181 | struct crypto_tfm *tfm = req->base.tfm; | ||
| 182 | struct digest_alg *dalg = &tfm->__crt_alg->cra_digest; | ||
| 183 | |||
| 184 | dalg->dia_init(tfm); | ||
| 185 | return 0; | ||
| 186 | } | ||
| 187 | |||
| 188 | static int digest_async_update(struct ahash_request *req) | ||
| 189 | { | ||
| 190 | struct crypto_tfm *tfm = req->base.tfm; | ||
| 191 | struct hash_desc desc = { | ||
| 192 | .tfm = __crypto_hash_cast(tfm), | ||
| 193 | .flags = req->base.flags, | ||
| 194 | }; | ||
| 195 | |||
| 196 | update(&desc, req->src, req->nbytes); | ||
| 197 | return 0; | ||
| 198 | } | ||
| 199 | |||
| 200 | static int digest_async_final(struct ahash_request *req) | ||
| 201 | { | ||
| 202 | struct crypto_tfm *tfm = req->base.tfm; | ||
| 203 | struct hash_desc desc = { | ||
| 204 | .tfm = __crypto_hash_cast(tfm), | ||
| 205 | .flags = req->base.flags, | ||
| 206 | }; | ||
| 207 | |||
| 208 | final(&desc, req->result); | ||
| 209 | return 0; | ||
| 210 | } | ||
| 211 | |||
| 212 | static int digest_async_digest(struct ahash_request *req) | ||
| 213 | { | ||
| 214 | struct crypto_tfm *tfm = req->base.tfm; | ||
| 215 | struct hash_desc desc = { | ||
| 216 | .tfm = __crypto_hash_cast(tfm), | ||
| 217 | .flags = req->base.flags, | ||
| 218 | }; | ||
| 219 | |||
| 220 | return digest(&desc, req->src, req->nbytes, req->result); | ||
| 221 | } | ||
| 222 | |||
| 223 | int crypto_init_digest_ops_async(struct crypto_tfm *tfm) | ||
| 224 | { | ||
| 225 | struct ahash_tfm *crt = &tfm->crt_ahash; | ||
| 226 | struct digest_alg *dalg = &tfm->__crt_alg->cra_digest; | ||
| 227 | |||
| 228 | if (dalg->dia_digestsize > PAGE_SIZE / 8) | ||
| 229 | return -EINVAL; | ||
| 230 | |||
| 231 | crt->init = digest_async_init; | ||
| 232 | crt->update = digest_async_update; | ||
| 233 | crt->final = digest_async_final; | ||
| 234 | crt->digest = digest_async_digest; | ||
| 235 | crt->setkey = dalg->dia_setkey ? digest_async_setkey : | ||
| 236 | digest_async_nosetkey; | ||
| 237 | crt->digestsize = dalg->dia_digestsize; | ||
| 238 | |||
| 239 | return 0; | ||
| 240 | } | ||
diff --git a/crypto/gcm.c b/crypto/gcm.c index 5fc3292483ef..c6547130624c 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c | |||
| @@ -40,7 +40,7 @@ struct crypto_rfc4106_ctx { | |||
| 40 | struct crypto_gcm_ghash_ctx { | 40 | struct crypto_gcm_ghash_ctx { |
| 41 | unsigned int cryptlen; | 41 | unsigned int cryptlen; |
| 42 | struct scatterlist *src; | 42 | struct scatterlist *src; |
| 43 | crypto_completion_t complete; | 43 | void (*complete)(struct aead_request *req, int err); |
| 44 | }; | 44 | }; |
| 45 | 45 | ||
| 46 | struct crypto_gcm_req_priv_ctx { | 46 | struct crypto_gcm_req_priv_ctx { |
| @@ -267,23 +267,26 @@ static int gcm_hash_final(struct aead_request *req, | |||
| 267 | return crypto_ahash_final(ahreq); | 267 | return crypto_ahash_final(ahreq); |
| 268 | } | 268 | } |
| 269 | 269 | ||
| 270 | static void gcm_hash_final_done(struct crypto_async_request *areq, | 270 | static void __gcm_hash_final_done(struct aead_request *req, int err) |
| 271 | int err) | ||
| 272 | { | 271 | { |
| 273 | struct aead_request *req = areq->data; | ||
| 274 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 272 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
| 275 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | 273 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
| 276 | 274 | ||
| 277 | if (!err) | 275 | if (!err) |
| 278 | crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16); | 276 | crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16); |
| 279 | 277 | ||
| 280 | gctx->complete(areq, err); | 278 | gctx->complete(req, err); |
| 281 | } | 279 | } |
| 282 | 280 | ||
| 283 | static void gcm_hash_len_done(struct crypto_async_request *areq, | 281 | static void gcm_hash_final_done(struct crypto_async_request *areq, int err) |
| 284 | int err) | ||
| 285 | { | 282 | { |
| 286 | struct aead_request *req = areq->data; | 283 | struct aead_request *req = areq->data; |
| 284 | |||
| 285 | __gcm_hash_final_done(req, err); | ||
| 286 | } | ||
| 287 | |||
| 288 | static void __gcm_hash_len_done(struct aead_request *req, int err) | ||
| 289 | { | ||
| 287 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 290 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
| 288 | 291 | ||
| 289 | if (!err) { | 292 | if (!err) { |
| @@ -292,13 +295,18 @@ static void gcm_hash_len_done(struct crypto_async_request *areq, | |||
| 292 | return; | 295 | return; |
| 293 | } | 296 | } |
| 294 | 297 | ||
| 295 | gcm_hash_final_done(areq, err); | 298 | __gcm_hash_final_done(req, err); |
| 296 | } | 299 | } |
| 297 | 300 | ||
| 298 | static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq, | 301 | static void gcm_hash_len_done(struct crypto_async_request *areq, int err) |
| 299 | int err) | ||
| 300 | { | 302 | { |
| 301 | struct aead_request *req = areq->data; | 303 | struct aead_request *req = areq->data; |
| 304 | |||
| 305 | __gcm_hash_len_done(req, err); | ||
| 306 | } | ||
| 307 | |||
| 308 | static void __gcm_hash_crypt_remain_done(struct aead_request *req, int err) | ||
| 309 | { | ||
| 302 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 310 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
| 303 | 311 | ||
| 304 | if (!err) { | 312 | if (!err) { |
| @@ -307,13 +315,19 @@ static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq, | |||
| 307 | return; | 315 | return; |
| 308 | } | 316 | } |
| 309 | 317 | ||
| 310 | gcm_hash_len_done(areq, err); | 318 | __gcm_hash_len_done(req, err); |
| 311 | } | 319 | } |
| 312 | 320 | ||
| 313 | static void gcm_hash_crypt_done(struct crypto_async_request *areq, | 321 | static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq, |
| 314 | int err) | 322 | int err) |
| 315 | { | 323 | { |
| 316 | struct aead_request *req = areq->data; | 324 | struct aead_request *req = areq->data; |
| 325 | |||
| 326 | __gcm_hash_crypt_remain_done(req, err); | ||
| 327 | } | ||
| 328 | |||
| 329 | static void __gcm_hash_crypt_done(struct aead_request *req, int err) | ||
| 330 | { | ||
| 317 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 331 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
| 318 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | 332 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
| 319 | unsigned int remain; | 333 | unsigned int remain; |
| @@ -327,13 +341,18 @@ static void gcm_hash_crypt_done(struct crypto_async_request *areq, | |||
| 327 | return; | 341 | return; |
| 328 | } | 342 | } |
| 329 | 343 | ||
| 330 | gcm_hash_crypt_remain_done(areq, err); | 344 | __gcm_hash_crypt_remain_done(req, err); |
| 331 | } | 345 | } |
| 332 | 346 | ||
| 333 | static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq, | 347 | static void gcm_hash_crypt_done(struct crypto_async_request *areq, int err) |
| 334 | int err) | ||
| 335 | { | 348 | { |
| 336 | struct aead_request *req = areq->data; | 349 | struct aead_request *req = areq->data; |
| 350 | |||
| 351 | __gcm_hash_crypt_done(req, err); | ||
| 352 | } | ||
| 353 | |||
| 354 | static void __gcm_hash_assoc_remain_done(struct aead_request *req, int err) | ||
| 355 | { | ||
| 337 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 356 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
| 338 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | 357 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
| 339 | crypto_completion_t complete; | 358 | crypto_completion_t complete; |
| @@ -350,15 +369,21 @@ static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq, | |||
| 350 | } | 369 | } |
| 351 | 370 | ||
| 352 | if (remain) | 371 | if (remain) |
| 353 | gcm_hash_crypt_done(areq, err); | 372 | __gcm_hash_crypt_done(req, err); |
| 354 | else | 373 | else |
| 355 | gcm_hash_crypt_remain_done(areq, err); | 374 | __gcm_hash_crypt_remain_done(req, err); |
| 356 | } | 375 | } |
| 357 | 376 | ||
| 358 | static void gcm_hash_assoc_done(struct crypto_async_request *areq, | 377 | static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq, |
| 359 | int err) | 378 | int err) |
| 360 | { | 379 | { |
| 361 | struct aead_request *req = areq->data; | 380 | struct aead_request *req = areq->data; |
| 381 | |||
| 382 | __gcm_hash_assoc_remain_done(req, err); | ||
| 383 | } | ||
| 384 | |||
| 385 | static void __gcm_hash_assoc_done(struct aead_request *req, int err) | ||
| 386 | { | ||
| 362 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 387 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
| 363 | unsigned int remain; | 388 | unsigned int remain; |
| 364 | 389 | ||
| @@ -371,13 +396,18 @@ static void gcm_hash_assoc_done(struct crypto_async_request *areq, | |||
| 371 | return; | 396 | return; |
| 372 | } | 397 | } |
| 373 | 398 | ||
| 374 | gcm_hash_assoc_remain_done(areq, err); | 399 | __gcm_hash_assoc_remain_done(req, err); |
| 375 | } | 400 | } |
| 376 | 401 | ||
| 377 | static void gcm_hash_init_done(struct crypto_async_request *areq, | 402 | static void gcm_hash_assoc_done(struct crypto_async_request *areq, int err) |
| 378 | int err) | ||
| 379 | { | 403 | { |
| 380 | struct aead_request *req = areq->data; | 404 | struct aead_request *req = areq->data; |
| 405 | |||
| 406 | __gcm_hash_assoc_done(req, err); | ||
| 407 | } | ||
| 408 | |||
| 409 | static void __gcm_hash_init_done(struct aead_request *req, int err) | ||
| 410 | { | ||
| 381 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 411 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
| 382 | crypto_completion_t complete; | 412 | crypto_completion_t complete; |
| 383 | unsigned int remain = 0; | 413 | unsigned int remain = 0; |
| @@ -393,9 +423,16 @@ static void gcm_hash_init_done(struct crypto_async_request *areq, | |||
| 393 | } | 423 | } |
| 394 | 424 | ||
| 395 | if (remain) | 425 | if (remain) |
| 396 | gcm_hash_assoc_done(areq, err); | 426 | __gcm_hash_assoc_done(req, err); |
| 397 | else | 427 | else |
| 398 | gcm_hash_assoc_remain_done(areq, err); | 428 | __gcm_hash_assoc_remain_done(req, err); |
| 429 | } | ||
| 430 | |||
| 431 | static void gcm_hash_init_done(struct crypto_async_request *areq, int err) | ||
| 432 | { | ||
| 433 | struct aead_request *req = areq->data; | ||
| 434 | |||
| 435 | __gcm_hash_init_done(req, err); | ||
| 399 | } | 436 | } |
| 400 | 437 | ||
| 401 | static int gcm_hash(struct aead_request *req, | 438 | static int gcm_hash(struct aead_request *req, |
| @@ -457,10 +494,8 @@ static void gcm_enc_copy_hash(struct aead_request *req, | |||
| 457 | crypto_aead_authsize(aead), 1); | 494 | crypto_aead_authsize(aead), 1); |
| 458 | } | 495 | } |
| 459 | 496 | ||
| 460 | static void gcm_enc_hash_done(struct crypto_async_request *areq, | 497 | static void gcm_enc_hash_done(struct aead_request *req, int err) |
| 461 | int err) | ||
| 462 | { | 498 | { |
| 463 | struct aead_request *req = areq->data; | ||
| 464 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 499 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
| 465 | 500 | ||
| 466 | if (!err) | 501 | if (!err) |
| @@ -469,8 +504,7 @@ static void gcm_enc_hash_done(struct crypto_async_request *areq, | |||
| 469 | aead_request_complete(req, err); | 504 | aead_request_complete(req, err); |
| 470 | } | 505 | } |
| 471 | 506 | ||
| 472 | static void gcm_encrypt_done(struct crypto_async_request *areq, | 507 | static void gcm_encrypt_done(struct crypto_async_request *areq, int err) |
| 473 | int err) | ||
| 474 | { | 508 | { |
| 475 | struct aead_request *req = areq->data; | 509 | struct aead_request *req = areq->data; |
| 476 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 510 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
| @@ -479,9 +513,13 @@ static void gcm_encrypt_done(struct crypto_async_request *areq, | |||
| 479 | err = gcm_hash(req, pctx); | 513 | err = gcm_hash(req, pctx); |
| 480 | if (err == -EINPROGRESS || err == -EBUSY) | 514 | if (err == -EINPROGRESS || err == -EBUSY) |
| 481 | return; | 515 | return; |
| 516 | else if (!err) { | ||
| 517 | crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16); | ||
| 518 | gcm_enc_copy_hash(req, pctx); | ||
| 519 | } | ||
| 482 | } | 520 | } |
| 483 | 521 | ||
| 484 | gcm_enc_hash_done(areq, err); | 522 | aead_request_complete(req, err); |
| 485 | } | 523 | } |
| 486 | 524 | ||
| 487 | static int crypto_gcm_encrypt(struct aead_request *req) | 525 | static int crypto_gcm_encrypt(struct aead_request *req) |
| @@ -538,9 +576,8 @@ static void gcm_decrypt_done(struct crypto_async_request *areq, int err) | |||
| 538 | aead_request_complete(req, err); | 576 | aead_request_complete(req, err); |
| 539 | } | 577 | } |
| 540 | 578 | ||
| 541 | static void gcm_dec_hash_done(struct crypto_async_request *areq, int err) | 579 | static void gcm_dec_hash_done(struct aead_request *req, int err) |
| 542 | { | 580 | { |
| 543 | struct aead_request *req = areq->data; | ||
| 544 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 581 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
| 545 | struct ablkcipher_request *abreq = &pctx->u.abreq; | 582 | struct ablkcipher_request *abreq = &pctx->u.abreq; |
| 546 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | 583 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
| @@ -552,9 +589,11 @@ static void gcm_dec_hash_done(struct crypto_async_request *areq, int err) | |||
| 552 | err = crypto_ablkcipher_decrypt(abreq); | 589 | err = crypto_ablkcipher_decrypt(abreq); |
| 553 | if (err == -EINPROGRESS || err == -EBUSY) | 590 | if (err == -EINPROGRESS || err == -EBUSY) |
| 554 | return; | 591 | return; |
| 592 | else if (!err) | ||
| 593 | err = crypto_gcm_verify(req, pctx); | ||
| 555 | } | 594 | } |
| 556 | 595 | ||
| 557 | gcm_decrypt_done(areq, err); | 596 | aead_request_complete(req, err); |
| 558 | } | 597 | } |
| 559 | 598 | ||
| 560 | static int crypto_gcm_decrypt(struct aead_request *req) | 599 | static int crypto_gcm_decrypt(struct aead_request *req) |
diff --git a/crypto/hash.c b/crypto/hash.c deleted file mode 100644 index cb86b19fd105..000000000000 --- a/crypto/hash.c +++ /dev/null | |||
| @@ -1,183 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Cryptographic Hash operations. | ||
| 3 | * | ||
| 4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License as published by the Free | ||
| 8 | * Software Foundation; either version 2 of the License, or (at your option) | ||
| 9 | * any later version. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <crypto/internal/hash.h> | ||
| 13 | #include <linux/errno.h> | ||
| 14 | #include <linux/kernel.h> | ||
| 15 | #include <linux/module.h> | ||
| 16 | #include <linux/slab.h> | ||
| 17 | #include <linux/seq_file.h> | ||
| 18 | |||
| 19 | #include "internal.h" | ||
| 20 | |||
| 21 | static unsigned int crypto_hash_ctxsize(struct crypto_alg *alg, u32 type, | ||
| 22 | u32 mask) | ||
| 23 | { | ||
| 24 | return alg->cra_ctxsize; | ||
| 25 | } | ||
| 26 | |||
| 27 | static int hash_setkey_unaligned(struct crypto_hash *crt, const u8 *key, | ||
| 28 | unsigned int keylen) | ||
| 29 | { | ||
| 30 | struct crypto_tfm *tfm = crypto_hash_tfm(crt); | ||
| 31 | struct hash_alg *alg = &tfm->__crt_alg->cra_hash; | ||
| 32 | unsigned long alignmask = crypto_hash_alignmask(crt); | ||
| 33 | int ret; | ||
| 34 | u8 *buffer, *alignbuffer; | ||
| 35 | unsigned long absize; | ||
| 36 | |||
| 37 | absize = keylen + alignmask; | ||
| 38 | buffer = kmalloc(absize, GFP_ATOMIC); | ||
| 39 | if (!buffer) | ||
| 40 | return -ENOMEM; | ||
| 41 | |||
| 42 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); | ||
| 43 | memcpy(alignbuffer, key, keylen); | ||
| 44 | ret = alg->setkey(crt, alignbuffer, keylen); | ||
| 45 | memset(alignbuffer, 0, keylen); | ||
| 46 | kfree(buffer); | ||
| 47 | return ret; | ||
| 48 | } | ||
| 49 | |||
| 50 | static int hash_setkey(struct crypto_hash *crt, const u8 *key, | ||
| 51 | unsigned int keylen) | ||
| 52 | { | ||
| 53 | struct crypto_tfm *tfm = crypto_hash_tfm(crt); | ||
| 54 | struct hash_alg *alg = &tfm->__crt_alg->cra_hash; | ||
| 55 | unsigned long alignmask = crypto_hash_alignmask(crt); | ||
| 56 | |||
| 57 | if ((unsigned long)key & alignmask) | ||
| 58 | return hash_setkey_unaligned(crt, key, keylen); | ||
| 59 | |||
| 60 | return alg->setkey(crt, key, keylen); | ||
| 61 | } | ||
| 62 | |||
| 63 | static int hash_async_setkey(struct crypto_ahash *tfm_async, const u8 *key, | ||
| 64 | unsigned int keylen) | ||
| 65 | { | ||
| 66 | struct crypto_tfm *tfm = crypto_ahash_tfm(tfm_async); | ||
| 67 | struct crypto_hash *tfm_hash = __crypto_hash_cast(tfm); | ||
| 68 | struct hash_alg *alg = &tfm->__crt_alg->cra_hash; | ||
| 69 | |||
| 70 | return alg->setkey(tfm_hash, key, keylen); | ||
| 71 | } | ||
| 72 | |||
| 73 | static int hash_async_init(struct ahash_request *req) | ||
| 74 | { | ||
| 75 | struct crypto_tfm *tfm = req->base.tfm; | ||
| 76 | struct hash_alg *alg = &tfm->__crt_alg->cra_hash; | ||
| 77 | struct hash_desc desc = { | ||
| 78 | .tfm = __crypto_hash_cast(tfm), | ||
| 79 | .flags = req->base.flags, | ||
| 80 | }; | ||
| 81 | |||
| 82 | return alg->init(&desc); | ||
| 83 | } | ||
| 84 | |||
| 85 | static int hash_async_update(struct ahash_request *req) | ||
| 86 | { | ||
| 87 | struct crypto_tfm *tfm = req->base.tfm; | ||
| 88 | struct hash_alg *alg = &tfm->__crt_alg->cra_hash; | ||
| 89 | struct hash_desc desc = { | ||
| 90 | .tfm = __crypto_hash_cast(tfm), | ||
| 91 | .flags = req->base.flags, | ||
| 92 | }; | ||
| 93 | |||
| 94 | return alg->update(&desc, req->src, req->nbytes); | ||
| 95 | } | ||
| 96 | |||
| 97 | static int hash_async_final(struct ahash_request *req) | ||
| 98 | { | ||
| 99 | struct crypto_tfm *tfm = req->base.tfm; | ||
| 100 | struct hash_alg *alg = &tfm->__crt_alg->cra_hash; | ||
| 101 | struct hash_desc desc = { | ||
| 102 | .tfm = __crypto_hash_cast(tfm), | ||
| 103 | .flags = req->base.flags, | ||
| 104 | }; | ||
| 105 | |||
| 106 | return alg->final(&desc, req->result); | ||
| 107 | } | ||
| 108 | |||
| 109 | static int hash_async_digest(struct ahash_request *req) | ||
| 110 | { | ||
| 111 | struct crypto_tfm *tfm = req->base.tfm; | ||
| 112 | struct hash_alg *alg = &tfm->__crt_alg->cra_hash; | ||
| 113 | struct hash_desc desc = { | ||
| 114 | .tfm = __crypto_hash_cast(tfm), | ||
| 115 | .flags = req->base.flags, | ||
| 116 | }; | ||
| 117 | |||
| 118 | return alg->digest(&desc, req->src, req->nbytes, req->result); | ||
| 119 | } | ||
| 120 | |||
| 121 | static int crypto_init_hash_ops_async(struct crypto_tfm *tfm) | ||
| 122 | { | ||
| 123 | struct ahash_tfm *crt = &tfm->crt_ahash; | ||
| 124 | struct hash_alg *alg = &tfm->__crt_alg->cra_hash; | ||
| 125 | |||
| 126 | crt->init = hash_async_init; | ||
| 127 | crt->update = hash_async_update; | ||
| 128 | crt->final = hash_async_final; | ||
| 129 | crt->digest = hash_async_digest; | ||
| 130 | crt->setkey = hash_async_setkey; | ||
| 131 | crt->digestsize = alg->digestsize; | ||
| 132 | |||
| 133 | return 0; | ||
| 134 | } | ||
| 135 | |||
| 136 | static int crypto_init_hash_ops_sync(struct crypto_tfm *tfm) | ||
| 137 | { | ||
| 138 | struct hash_tfm *crt = &tfm->crt_hash; | ||
| 139 | struct hash_alg *alg = &tfm->__crt_alg->cra_hash; | ||
| 140 | |||
| 141 | crt->init = alg->init; | ||
| 142 | crt->update = alg->update; | ||
| 143 | crt->final = alg->final; | ||
| 144 | crt->digest = alg->digest; | ||
| 145 | crt->setkey = hash_setkey; | ||
| 146 | crt->digestsize = alg->digestsize; | ||
| 147 | |||
| 148 | return 0; | ||
| 149 | } | ||
| 150 | |||
| 151 | static int crypto_init_hash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) | ||
| 152 | { | ||
| 153 | struct hash_alg *alg = &tfm->__crt_alg->cra_hash; | ||
| 154 | |||
| 155 | if (alg->digestsize > PAGE_SIZE / 8) | ||
| 156 | return -EINVAL; | ||
| 157 | |||
| 158 | if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) != CRYPTO_ALG_TYPE_HASH_MASK) | ||
| 159 | return crypto_init_hash_ops_async(tfm); | ||
| 160 | else | ||
| 161 | return crypto_init_hash_ops_sync(tfm); | ||
| 162 | } | ||
| 163 | |||
| 164 | static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg) | ||
| 165 | __attribute__ ((unused)); | ||
| 166 | static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg) | ||
| 167 | { | ||
| 168 | seq_printf(m, "type : hash\n"); | ||
| 169 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); | ||
| 170 | seq_printf(m, "digestsize : %u\n", alg->cra_hash.digestsize); | ||
| 171 | } | ||
| 172 | |||
| 173 | const struct crypto_type crypto_hash_type = { | ||
| 174 | .ctxsize = crypto_hash_ctxsize, | ||
| 175 | .init = crypto_init_hash_ops, | ||
| 176 | #ifdef CONFIG_PROC_FS | ||
| 177 | .show = crypto_hash_show, | ||
| 178 | #endif | ||
| 179 | }; | ||
| 180 | EXPORT_SYMBOL_GPL(crypto_hash_type); | ||
| 181 | |||
| 182 | MODULE_LICENSE("GPL"); | ||
| 183 | MODULE_DESCRIPTION("Generic cryptographic hash type"); | ||
diff --git a/crypto/proc.c b/crypto/proc.c index 5dc07e442fca..58fef67d4f4d 100644 --- a/crypto/proc.c +++ b/crypto/proc.c | |||
| @@ -25,28 +25,22 @@ | |||
| 25 | #ifdef CONFIG_CRYPTO_FIPS | 25 | #ifdef CONFIG_CRYPTO_FIPS |
| 26 | static struct ctl_table crypto_sysctl_table[] = { | 26 | static struct ctl_table crypto_sysctl_table[] = { |
| 27 | { | 27 | { |
| 28 | .ctl_name = CTL_UNNUMBERED, | ||
| 29 | .procname = "fips_enabled", | 28 | .procname = "fips_enabled", |
| 30 | .data = &fips_enabled, | 29 | .data = &fips_enabled, |
| 31 | .maxlen = sizeof(int), | 30 | .maxlen = sizeof(int), |
| 32 | .mode = 0444, | 31 | .mode = 0444, |
| 33 | .proc_handler = &proc_dointvec | 32 | .proc_handler = proc_dointvec |
| 34 | }, | ||
| 35 | { | ||
| 36 | .ctl_name = 0, | ||
| 37 | }, | 33 | }, |
| 34 | {} | ||
| 38 | }; | 35 | }; |
| 39 | 36 | ||
| 40 | static struct ctl_table crypto_dir_table[] = { | 37 | static struct ctl_table crypto_dir_table[] = { |
| 41 | { | 38 | { |
| 42 | .ctl_name = CTL_UNNUMBERED, | ||
| 43 | .procname = "crypto", | 39 | .procname = "crypto", |
| 44 | .mode = 0555, | 40 | .mode = 0555, |
| 45 | .child = crypto_sysctl_table | 41 | .child = crypto_sysctl_table |
| 46 | }, | 42 | }, |
| 47 | { | 43 | {} |
| 48 | .ctl_name = 0, | ||
| 49 | }, | ||
| 50 | }; | 44 | }; |
| 51 | 45 | ||
| 52 | static struct ctl_table_header *crypto_sysctls; | 46 | static struct ctl_table_header *crypto_sysctls; |
| @@ -115,13 +109,6 @@ static int c_show(struct seq_file *m, void *p) | |||
| 115 | seq_printf(m, "max keysize : %u\n", | 109 | seq_printf(m, "max keysize : %u\n", |
| 116 | alg->cra_cipher.cia_max_keysize); | 110 | alg->cra_cipher.cia_max_keysize); |
| 117 | break; | 111 | break; |
| 118 | |||
| 119 | case CRYPTO_ALG_TYPE_DIGEST: | ||
| 120 | seq_printf(m, "type : digest\n"); | ||
| 121 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); | ||
| 122 | seq_printf(m, "digestsize : %u\n", | ||
| 123 | alg->cra_digest.dia_digestsize); | ||
| 124 | break; | ||
| 125 | case CRYPTO_ALG_TYPE_COMPRESS: | 112 | case CRYPTO_ALG_TYPE_COMPRESS: |
| 126 | seq_printf(m, "type : compression\n"); | 113 | seq_printf(m, "type : compression\n"); |
| 127 | break; | 114 | break; |
diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 6d5b746637be..7620bfce92f2 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c | |||
| @@ -1201,7 +1201,7 @@ static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template, | |||
| 1201 | unsigned int tcount) | 1201 | unsigned int tcount) |
| 1202 | { | 1202 | { |
| 1203 | const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm)); | 1203 | const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm)); |
| 1204 | int err, i, j, seedsize; | 1204 | int err = 0, i, j, seedsize; |
| 1205 | u8 *seed; | 1205 | u8 *seed; |
| 1206 | char result[32]; | 1206 | char result[32]; |
| 1207 | 1207 | ||
| @@ -1943,6 +1943,15 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
| 1943 | } | 1943 | } |
| 1944 | } | 1944 | } |
| 1945 | }, { | 1945 | }, { |
| 1946 | .alg = "ghash", | ||
| 1947 | .test = alg_test_hash, | ||
| 1948 | .suite = { | ||
| 1949 | .hash = { | ||
| 1950 | .vecs = ghash_tv_template, | ||
| 1951 | .count = GHASH_TEST_VECTORS | ||
| 1952 | } | ||
| 1953 | } | ||
| 1954 | }, { | ||
| 1946 | .alg = "hmac(md5)", | 1955 | .alg = "hmac(md5)", |
| 1947 | .test = alg_test_hash, | 1956 | .test = alg_test_hash, |
| 1948 | .suite = { | 1957 | .suite = { |
diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 9963b18983ab..fb765173d41c 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h | |||
| @@ -1003,6 +1003,21 @@ static struct hash_testvec tgr128_tv_template[] = { | |||
| 1003 | }, | 1003 | }, |
| 1004 | }; | 1004 | }; |
| 1005 | 1005 | ||
| 1006 | #define GHASH_TEST_VECTORS 1 | ||
| 1007 | |||
| 1008 | static struct hash_testvec ghash_tv_template[] = | ||
| 1009 | { | ||
| 1010 | { | ||
| 1011 | |||
| 1012 | .key = "\xdf\xa6\xbf\x4d\xed\x81\xdb\x03\xff\xca\xff\x95\xf8\x30\xf0\x61", | ||
| 1013 | .ksize = 16, | ||
| 1014 | .plaintext = "\x95\x2b\x2a\x56\xa5\x60\x04a\xc0\xb3\x2b\x66\x56\xa0\x5b\x40\xb6", | ||
| 1015 | .psize = 16, | ||
| 1016 | .digest = "\xda\x53\xeb\x0a\xd2\xc5\x5b\xb6" | ||
| 1017 | "\x4f\xc4\x80\x2c\xc3\xfe\xda\x60", | ||
| 1018 | }, | ||
| 1019 | }; | ||
| 1020 | |||
| 1006 | /* | 1021 | /* |
| 1007 | * HMAC-MD5 test vectors from RFC2202 | 1022 | * HMAC-MD5 test vectors from RFC2202 |
| 1008 | * (These need to be fixed to not use strlen). | 1023 | * (These need to be fixed to not use strlen). |
