diff options
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r-- | drivers/md/dm-crypt.c | 193 |
1 files changed, 114 insertions, 79 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 64fee90bb68b..ac54f697c508 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -36,7 +36,6 @@ struct dm_crypt_io { | |||
36 | struct work_struct work; | 36 | struct work_struct work; |
37 | atomic_t pending; | 37 | atomic_t pending; |
38 | int error; | 38 | int error; |
39 | int post_process; | ||
40 | }; | 39 | }; |
41 | 40 | ||
42 | /* | 41 | /* |
@@ -57,7 +56,7 @@ struct crypt_config; | |||
57 | 56 | ||
58 | struct crypt_iv_operations { | 57 | struct crypt_iv_operations { |
59 | int (*ctr)(struct crypt_config *cc, struct dm_target *ti, | 58 | int (*ctr)(struct crypt_config *cc, struct dm_target *ti, |
60 | const char *opts); | 59 | const char *opts); |
61 | void (*dtr)(struct crypt_config *cc); | 60 | void (*dtr)(struct crypt_config *cc); |
62 | const char *(*status)(struct crypt_config *cc); | 61 | const char *(*status)(struct crypt_config *cc); |
63 | int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector); | 62 | int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector); |
@@ -80,6 +79,8 @@ struct crypt_config { | |||
80 | mempool_t *page_pool; | 79 | mempool_t *page_pool; |
81 | struct bio_set *bs; | 80 | struct bio_set *bs; |
82 | 81 | ||
82 | struct workqueue_struct *io_queue; | ||
83 | struct workqueue_struct *crypt_queue; | ||
83 | /* | 84 | /* |
84 | * crypto related data | 85 | * crypto related data |
85 | */ | 86 | */ |
@@ -112,7 +113,7 @@ static void clone_init(struct dm_crypt_io *, struct bio *); | |||
112 | * Different IV generation algorithms: | 113 | * Different IV generation algorithms: |
113 | * | 114 | * |
114 | * plain: the initial vector is the 32-bit little-endian version of the sector | 115 | * plain: the initial vector is the 32-bit little-endian version of the sector |
115 | * number, padded with zeros if neccessary. | 116 | * number, padded with zeros if necessary. |
116 | * | 117 | * |
117 | * essiv: "encrypted sector|salt initial vector", the sector number is | 118 | * essiv: "encrypted sector|salt initial vector", the sector number is |
118 | * encrypted with the bulk cipher using a salt as key. The salt | 119 | * encrypted with the bulk cipher using a salt as key. The salt |
@@ -137,7 +138,7 @@ static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector) | |||
137 | } | 138 | } |
138 | 139 | ||
139 | static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, | 140 | static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, |
140 | const char *opts) | 141 | const char *opts) |
141 | { | 142 | { |
142 | struct crypto_cipher *essiv_tfm; | 143 | struct crypto_cipher *essiv_tfm; |
143 | struct crypto_hash *hash_tfm; | 144 | struct crypto_hash *hash_tfm; |
@@ -175,6 +176,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, | |||
175 | 176 | ||
176 | if (err) { | 177 | if (err) { |
177 | ti->error = "Error calculating hash in ESSIV"; | 178 | ti->error = "Error calculating hash in ESSIV"; |
179 | kfree(salt); | ||
178 | return err; | 180 | return err; |
179 | } | 181 | } |
180 | 182 | ||
@@ -188,7 +190,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, | |||
188 | if (crypto_cipher_blocksize(essiv_tfm) != | 190 | if (crypto_cipher_blocksize(essiv_tfm) != |
189 | crypto_blkcipher_ivsize(cc->tfm)) { | 191 | crypto_blkcipher_ivsize(cc->tfm)) { |
190 | ti->error = "Block size of ESSIV cipher does " | 192 | ti->error = "Block size of ESSIV cipher does " |
191 | "not match IV size of block cipher"; | 193 | "not match IV size of block cipher"; |
192 | crypto_free_cipher(essiv_tfm); | 194 | crypto_free_cipher(essiv_tfm); |
193 | kfree(salt); | 195 | kfree(salt); |
194 | return -EINVAL; | 196 | return -EINVAL; |
@@ -319,10 +321,10 @@ crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out, | |||
319 | return r; | 321 | return r; |
320 | } | 322 | } |
321 | 323 | ||
322 | static void | 324 | static void crypt_convert_init(struct crypt_config *cc, |
323 | crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx, | 325 | struct convert_context *ctx, |
324 | struct bio *bio_out, struct bio *bio_in, | 326 | struct bio *bio_out, struct bio *bio_in, |
325 | sector_t sector, int write) | 327 | sector_t sector, int write) |
326 | { | 328 | { |
327 | ctx->bio_in = bio_in; | 329 | ctx->bio_in = bio_in; |
328 | ctx->bio_out = bio_out; | 330 | ctx->bio_out = bio_out; |
@@ -338,7 +340,7 @@ crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx, | |||
338 | * Encrypt / decrypt data from one bio to another one (can be the same one) | 340 | * Encrypt / decrypt data from one bio to another one (can be the same one) |
339 | */ | 341 | */ |
340 | static int crypt_convert(struct crypt_config *cc, | 342 | static int crypt_convert(struct crypt_config *cc, |
341 | struct convert_context *ctx) | 343 | struct convert_context *ctx) |
342 | { | 344 | { |
343 | int r = 0; | 345 | int r = 0; |
344 | 346 | ||
@@ -346,16 +348,17 @@ static int crypt_convert(struct crypt_config *cc, | |||
346 | ctx->idx_out < ctx->bio_out->bi_vcnt) { | 348 | ctx->idx_out < ctx->bio_out->bi_vcnt) { |
347 | struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); | 349 | struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); |
348 | struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); | 350 | struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); |
349 | struct scatterlist sg_in = { | 351 | struct scatterlist sg_in, sg_out; |
350 | .page = bv_in->bv_page, | 352 | |
351 | .offset = bv_in->bv_offset + ctx->offset_in, | 353 | sg_init_table(&sg_in, 1); |
352 | .length = 1 << SECTOR_SHIFT | 354 | sg_set_page(&sg_in, bv_in->bv_page); |
353 | }; | 355 | sg_in.offset = bv_in->bv_offset + ctx->offset_in; |
354 | struct scatterlist sg_out = { | 356 | sg_in.length = 1 << SECTOR_SHIFT; |
355 | .page = bv_out->bv_page, | 357 | |
356 | .offset = bv_out->bv_offset + ctx->offset_out, | 358 | sg_init_table(&sg_out, 1); |
357 | .length = 1 << SECTOR_SHIFT | 359 | sg_set_page(&sg_out, bv_out->bv_page); |
358 | }; | 360 | sg_out.offset = bv_out->bv_offset + ctx->offset_out; |
361 | sg_out.length = 1 << SECTOR_SHIFT; | ||
359 | 362 | ||
360 | ctx->offset_in += sg_in.length; | 363 | ctx->offset_in += sg_in.length; |
361 | if (ctx->offset_in >= bv_in->bv_len) { | 364 | if (ctx->offset_in >= bv_in->bv_len) { |
@@ -370,7 +373,7 @@ static int crypt_convert(struct crypt_config *cc, | |||
370 | } | 373 | } |
371 | 374 | ||
372 | r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length, | 375 | r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length, |
373 | ctx->write, ctx->sector); | 376 | ctx->write, ctx->sector); |
374 | if (r < 0) | 377 | if (r < 0) |
375 | break; | 378 | break; |
376 | 379 | ||
@@ -380,13 +383,13 @@ static int crypt_convert(struct crypt_config *cc, | |||
380 | return r; | 383 | return r; |
381 | } | 384 | } |
382 | 385 | ||
383 | static void dm_crypt_bio_destructor(struct bio *bio) | 386 | static void dm_crypt_bio_destructor(struct bio *bio) |
384 | { | 387 | { |
385 | struct dm_crypt_io *io = bio->bi_private; | 388 | struct dm_crypt_io *io = bio->bi_private; |
386 | struct crypt_config *cc = io->target->private; | 389 | struct crypt_config *cc = io->target->private; |
387 | 390 | ||
388 | bio_free(bio, cc->bs); | 391 | bio_free(bio, cc->bs); |
389 | } | 392 | } |
390 | 393 | ||
391 | /* | 394 | /* |
392 | * Generate a new unfragmented bio with the given size | 395 | * Generate a new unfragmented bio with the given size |
@@ -458,7 +461,7 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) | |||
458 | * One of the bios was finished. Check for completion of | 461 | * One of the bios was finished. Check for completion of |
459 | * the whole request and correctly clean up the buffer. | 462 | * the whole request and correctly clean up the buffer. |
460 | */ | 463 | */ |
461 | static void dec_pending(struct dm_crypt_io *io, int error) | 464 | static void crypt_dec_pending(struct dm_crypt_io *io, int error) |
462 | { | 465 | { |
463 | struct crypt_config *cc = (struct crypt_config *) io->target->private; | 466 | struct crypt_config *cc = (struct crypt_config *) io->target->private; |
464 | 467 | ||
@@ -474,18 +477,36 @@ static void dec_pending(struct dm_crypt_io *io, int error) | |||
474 | } | 477 | } |
475 | 478 | ||
476 | /* | 479 | /* |
477 | * kcryptd: | 480 | * kcryptd/kcryptd_io: |
478 | * | 481 | * |
479 | * Needed because it would be very unwise to do decryption in an | 482 | * Needed because it would be very unwise to do decryption in an |
480 | * interrupt context. | 483 | * interrupt context. |
484 | * | ||
485 | * kcryptd performs the actual encryption or decryption. | ||
486 | * | ||
487 | * kcryptd_io performs the IO submission. | ||
488 | * | ||
489 | * They must be separated as otherwise the final stages could be | ||
490 | * starved by new requests which can block in the first stages due | ||
491 | * to memory allocation. | ||
481 | */ | 492 | */ |
482 | static struct workqueue_struct *_kcryptd_workqueue; | ||
483 | static void kcryptd_do_work(struct work_struct *work); | 493 | static void kcryptd_do_work(struct work_struct *work); |
494 | static void kcryptd_do_crypt(struct work_struct *work); | ||
484 | 495 | ||
485 | static void kcryptd_queue_io(struct dm_crypt_io *io) | 496 | static void kcryptd_queue_io(struct dm_crypt_io *io) |
486 | { | 497 | { |
498 | struct crypt_config *cc = io->target->private; | ||
499 | |||
487 | INIT_WORK(&io->work, kcryptd_do_work); | 500 | INIT_WORK(&io->work, kcryptd_do_work); |
488 | queue_work(_kcryptd_workqueue, &io->work); | 501 | queue_work(cc->io_queue, &io->work); |
502 | } | ||
503 | |||
504 | static void kcryptd_queue_crypt(struct dm_crypt_io *io) | ||
505 | { | ||
506 | struct crypt_config *cc = io->target->private; | ||
507 | |||
508 | INIT_WORK(&io->work, kcryptd_do_crypt); | ||
509 | queue_work(cc->crypt_queue, &io->work); | ||
489 | } | 510 | } |
490 | 511 | ||
491 | static void crypt_endio(struct bio *clone, int error) | 512 | static void crypt_endio(struct bio *clone, int error) |
@@ -508,13 +529,12 @@ static void crypt_endio(struct bio *clone, int error) | |||
508 | } | 529 | } |
509 | 530 | ||
510 | bio_put(clone); | 531 | bio_put(clone); |
511 | io->post_process = 1; | 532 | kcryptd_queue_crypt(io); |
512 | kcryptd_queue_io(io); | ||
513 | return; | 533 | return; |
514 | 534 | ||
515 | out: | 535 | out: |
516 | bio_put(clone); | 536 | bio_put(clone); |
517 | dec_pending(io, error); | 537 | crypt_dec_pending(io, error); |
518 | } | 538 | } |
519 | 539 | ||
520 | static void clone_init(struct dm_crypt_io *io, struct bio *clone) | 540 | static void clone_init(struct dm_crypt_io *io, struct bio *clone) |
@@ -544,7 +564,7 @@ static void process_read(struct dm_crypt_io *io) | |||
544 | */ | 564 | */ |
545 | clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs); | 565 | clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs); |
546 | if (unlikely(!clone)) { | 566 | if (unlikely(!clone)) { |
547 | dec_pending(io, -ENOMEM); | 567 | crypt_dec_pending(io, -ENOMEM); |
548 | return; | 568 | return; |
549 | } | 569 | } |
550 | 570 | ||
@@ -579,7 +599,7 @@ static void process_write(struct dm_crypt_io *io) | |||
579 | while (remaining) { | 599 | while (remaining) { |
580 | clone = crypt_alloc_buffer(io, remaining); | 600 | clone = crypt_alloc_buffer(io, remaining); |
581 | if (unlikely(!clone)) { | 601 | if (unlikely(!clone)) { |
582 | dec_pending(io, -ENOMEM); | 602 | crypt_dec_pending(io, -ENOMEM); |
583 | return; | 603 | return; |
584 | } | 604 | } |
585 | 605 | ||
@@ -589,7 +609,7 @@ static void process_write(struct dm_crypt_io *io) | |||
589 | if (unlikely(crypt_convert(cc, &ctx) < 0)) { | 609 | if (unlikely(crypt_convert(cc, &ctx) < 0)) { |
590 | crypt_free_buffer_pages(cc, clone); | 610 | crypt_free_buffer_pages(cc, clone); |
591 | bio_put(clone); | 611 | bio_put(clone); |
592 | dec_pending(io, -EIO); | 612 | crypt_dec_pending(io, -EIO); |
593 | return; | 613 | return; |
594 | } | 614 | } |
595 | 615 | ||
@@ -624,17 +644,23 @@ static void process_read_endio(struct dm_crypt_io *io) | |||
624 | crypt_convert_init(cc, &ctx, io->base_bio, io->base_bio, | 644 | crypt_convert_init(cc, &ctx, io->base_bio, io->base_bio, |
625 | io->base_bio->bi_sector - io->target->begin, 0); | 645 | io->base_bio->bi_sector - io->target->begin, 0); |
626 | 646 | ||
627 | dec_pending(io, crypt_convert(cc, &ctx)); | 647 | crypt_dec_pending(io, crypt_convert(cc, &ctx)); |
628 | } | 648 | } |
629 | 649 | ||
630 | static void kcryptd_do_work(struct work_struct *work) | 650 | static void kcryptd_do_work(struct work_struct *work) |
631 | { | 651 | { |
632 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); | 652 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); |
633 | 653 | ||
634 | if (io->post_process) | 654 | if (bio_data_dir(io->base_bio) == READ) |
635 | process_read_endio(io); | ||
636 | else if (bio_data_dir(io->base_bio) == READ) | ||
637 | process_read(io); | 655 | process_read(io); |
656 | } | ||
657 | |||
658 | static void kcryptd_do_crypt(struct work_struct *work) | ||
659 | { | ||
660 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); | ||
661 | |||
662 | if (bio_data_dir(io->base_bio) == READ) | ||
663 | process_read_endio(io); | ||
638 | else | 664 | else |
639 | process_write(io); | 665 | process_write(io); |
640 | } | 666 | } |
@@ -690,7 +716,7 @@ static int crypt_set_key(struct crypt_config *cc, char *key) | |||
690 | cc->key_size = key_size; /* initial settings */ | 716 | cc->key_size = key_size; /* initial settings */ |
691 | 717 | ||
692 | if ((!key_size && strcmp(key, "-")) || | 718 | if ((!key_size && strcmp(key, "-")) || |
693 | (key_size && crypt_decode_key(cc->key, key, key_size) < 0)) | 719 | (key_size && crypt_decode_key(cc->key, key, key_size) < 0)) |
694 | return -EINVAL; | 720 | return -EINVAL; |
695 | 721 | ||
696 | set_bit(DM_CRYPT_KEY_VALID, &cc->flags); | 722 | set_bit(DM_CRYPT_KEY_VALID, &cc->flags); |
@@ -746,7 +772,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
746 | 772 | ||
747 | if (crypt_set_key(cc, argv[1])) { | 773 | if (crypt_set_key(cc, argv[1])) { |
748 | ti->error = "Error decoding key"; | 774 | ti->error = "Error decoding key"; |
749 | goto bad1; | 775 | goto bad_cipher; |
750 | } | 776 | } |
751 | 777 | ||
752 | /* Compatiblity mode for old dm-crypt cipher strings */ | 778 | /* Compatiblity mode for old dm-crypt cipher strings */ |
@@ -757,19 +783,19 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
757 | 783 | ||
758 | if (strcmp(chainmode, "ecb") && !ivmode) { | 784 | if (strcmp(chainmode, "ecb") && !ivmode) { |
759 | ti->error = "This chaining mode requires an IV mechanism"; | 785 | ti->error = "This chaining mode requires an IV mechanism"; |
760 | goto bad1; | 786 | goto bad_cipher; |
761 | } | 787 | } |
762 | 788 | ||
763 | if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)", chainmode, | 789 | if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)", |
764 | cipher) >= CRYPTO_MAX_ALG_NAME) { | 790 | chainmode, cipher) >= CRYPTO_MAX_ALG_NAME) { |
765 | ti->error = "Chain mode + cipher name is too long"; | 791 | ti->error = "Chain mode + cipher name is too long"; |
766 | goto bad1; | 792 | goto bad_cipher; |
767 | } | 793 | } |
768 | 794 | ||
769 | tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); | 795 | tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); |
770 | if (IS_ERR(tfm)) { | 796 | if (IS_ERR(tfm)) { |
771 | ti->error = "Error allocating crypto tfm"; | 797 | ti->error = "Error allocating crypto tfm"; |
772 | goto bad1; | 798 | goto bad_cipher; |
773 | } | 799 | } |
774 | 800 | ||
775 | strcpy(cc->cipher, cipher); | 801 | strcpy(cc->cipher, cipher); |
@@ -793,18 +819,18 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
793 | cc->iv_gen_ops = &crypt_iv_null_ops; | 819 | cc->iv_gen_ops = &crypt_iv_null_ops; |
794 | else { | 820 | else { |
795 | ti->error = "Invalid IV mode"; | 821 | ti->error = "Invalid IV mode"; |
796 | goto bad2; | 822 | goto bad_ivmode; |
797 | } | 823 | } |
798 | 824 | ||
799 | if (cc->iv_gen_ops && cc->iv_gen_ops->ctr && | 825 | if (cc->iv_gen_ops && cc->iv_gen_ops->ctr && |
800 | cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) | 826 | cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) |
801 | goto bad2; | 827 | goto bad_ivmode; |
802 | 828 | ||
803 | cc->iv_size = crypto_blkcipher_ivsize(tfm); | 829 | cc->iv_size = crypto_blkcipher_ivsize(tfm); |
804 | if (cc->iv_size) | 830 | if (cc->iv_size) |
805 | /* at least a 64 bit sector number should fit in our buffer */ | 831 | /* at least a 64 bit sector number should fit in our buffer */ |
806 | cc->iv_size = max(cc->iv_size, | 832 | cc->iv_size = max(cc->iv_size, |
807 | (unsigned int)(sizeof(u64) / sizeof(u8))); | 833 | (unsigned int)(sizeof(u64) / sizeof(u8))); |
808 | else { | 834 | else { |
809 | if (cc->iv_gen_ops) { | 835 | if (cc->iv_gen_ops) { |
810 | DMWARN("Selected cipher does not support IVs"); | 836 | DMWARN("Selected cipher does not support IVs"); |
@@ -817,13 +843,13 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
817 | cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); | 843 | cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); |
818 | if (!cc->io_pool) { | 844 | if (!cc->io_pool) { |
819 | ti->error = "Cannot allocate crypt io mempool"; | 845 | ti->error = "Cannot allocate crypt io mempool"; |
820 | goto bad3; | 846 | goto bad_slab_pool; |
821 | } | 847 | } |
822 | 848 | ||
823 | cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); | 849 | cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); |
824 | if (!cc->page_pool) { | 850 | if (!cc->page_pool) { |
825 | ti->error = "Cannot allocate page mempool"; | 851 | ti->error = "Cannot allocate page mempool"; |
826 | goto bad4; | 852 | goto bad_page_pool; |
827 | } | 853 | } |
828 | 854 | ||
829 | cc->bs = bioset_create(MIN_IOS, MIN_IOS); | 855 | cc->bs = bioset_create(MIN_IOS, MIN_IOS); |
@@ -834,25 +860,25 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
834 | 860 | ||
835 | if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) { | 861 | if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) { |
836 | ti->error = "Error setting key"; | 862 | ti->error = "Error setting key"; |
837 | goto bad5; | 863 | goto bad_device; |
838 | } | 864 | } |
839 | 865 | ||
840 | if (sscanf(argv[2], "%llu", &tmpll) != 1) { | 866 | if (sscanf(argv[2], "%llu", &tmpll) != 1) { |
841 | ti->error = "Invalid iv_offset sector"; | 867 | ti->error = "Invalid iv_offset sector"; |
842 | goto bad5; | 868 | goto bad_device; |
843 | } | 869 | } |
844 | cc->iv_offset = tmpll; | 870 | cc->iv_offset = tmpll; |
845 | 871 | ||
846 | if (sscanf(argv[4], "%llu", &tmpll) != 1) { | 872 | if (sscanf(argv[4], "%llu", &tmpll) != 1) { |
847 | ti->error = "Invalid device sector"; | 873 | ti->error = "Invalid device sector"; |
848 | goto bad5; | 874 | goto bad_device; |
849 | } | 875 | } |
850 | cc->start = tmpll; | 876 | cc->start = tmpll; |
851 | 877 | ||
852 | if (dm_get_device(ti, argv[3], cc->start, ti->len, | 878 | if (dm_get_device(ti, argv[3], cc->start, ti->len, |
853 | dm_table_get_mode(ti->table), &cc->dev)) { | 879 | dm_table_get_mode(ti->table), &cc->dev)) { |
854 | ti->error = "Device lookup failed"; | 880 | ti->error = "Device lookup failed"; |
855 | goto bad5; | 881 | goto bad_device; |
856 | } | 882 | } |
857 | 883 | ||
858 | if (ivmode && cc->iv_gen_ops) { | 884 | if (ivmode && cc->iv_gen_ops) { |
@@ -861,27 +887,45 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
861 | cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL); | 887 | cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL); |
862 | if (!cc->iv_mode) { | 888 | if (!cc->iv_mode) { |
863 | ti->error = "Error kmallocing iv_mode string"; | 889 | ti->error = "Error kmallocing iv_mode string"; |
864 | goto bad5; | 890 | goto bad_ivmode_string; |
865 | } | 891 | } |
866 | strcpy(cc->iv_mode, ivmode); | 892 | strcpy(cc->iv_mode, ivmode); |
867 | } else | 893 | } else |
868 | cc->iv_mode = NULL; | 894 | cc->iv_mode = NULL; |
869 | 895 | ||
896 | cc->io_queue = create_singlethread_workqueue("kcryptd_io"); | ||
897 | if (!cc->io_queue) { | ||
898 | ti->error = "Couldn't create kcryptd io queue"; | ||
899 | goto bad_io_queue; | ||
900 | } | ||
901 | |||
902 | cc->crypt_queue = create_singlethread_workqueue("kcryptd"); | ||
903 | if (!cc->crypt_queue) { | ||
904 | ti->error = "Couldn't create kcryptd queue"; | ||
905 | goto bad_crypt_queue; | ||
906 | } | ||
907 | |||
870 | ti->private = cc; | 908 | ti->private = cc; |
871 | return 0; | 909 | return 0; |
872 | 910 | ||
873 | bad5: | 911 | bad_crypt_queue: |
912 | destroy_workqueue(cc->io_queue); | ||
913 | bad_io_queue: | ||
914 | kfree(cc->iv_mode); | ||
915 | bad_ivmode_string: | ||
916 | dm_put_device(ti, cc->dev); | ||
917 | bad_device: | ||
874 | bioset_free(cc->bs); | 918 | bioset_free(cc->bs); |
875 | bad_bs: | 919 | bad_bs: |
876 | mempool_destroy(cc->page_pool); | 920 | mempool_destroy(cc->page_pool); |
877 | bad4: | 921 | bad_page_pool: |
878 | mempool_destroy(cc->io_pool); | 922 | mempool_destroy(cc->io_pool); |
879 | bad3: | 923 | bad_slab_pool: |
880 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) | 924 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) |
881 | cc->iv_gen_ops->dtr(cc); | 925 | cc->iv_gen_ops->dtr(cc); |
882 | bad2: | 926 | bad_ivmode: |
883 | crypto_free_blkcipher(tfm); | 927 | crypto_free_blkcipher(tfm); |
884 | bad1: | 928 | bad_cipher: |
885 | /* Must zero key material before freeing */ | 929 | /* Must zero key material before freeing */ |
886 | memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); | 930 | memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); |
887 | kfree(cc); | 931 | kfree(cc); |
@@ -892,7 +936,8 @@ static void crypt_dtr(struct dm_target *ti) | |||
892 | { | 936 | { |
893 | struct crypt_config *cc = (struct crypt_config *) ti->private; | 937 | struct crypt_config *cc = (struct crypt_config *) ti->private; |
894 | 938 | ||
895 | flush_workqueue(_kcryptd_workqueue); | 939 | destroy_workqueue(cc->io_queue); |
940 | destroy_workqueue(cc->crypt_queue); | ||
896 | 941 | ||
897 | bioset_free(cc->bs); | 942 | bioset_free(cc->bs); |
898 | mempool_destroy(cc->page_pool); | 943 | mempool_destroy(cc->page_pool); |
@@ -918,9 +963,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, | |||
918 | io = mempool_alloc(cc->io_pool, GFP_NOIO); | 963 | io = mempool_alloc(cc->io_pool, GFP_NOIO); |
919 | io->target = ti; | 964 | io->target = ti; |
920 | io->base_bio = bio; | 965 | io->base_bio = bio; |
921 | io->error = io->post_process = 0; | 966 | io->error = 0; |
922 | atomic_set(&io->pending, 0); | 967 | atomic_set(&io->pending, 0); |
923 | kcryptd_queue_io(io); | 968 | |
969 | if (bio_data_dir(io->base_bio) == READ) | ||
970 | kcryptd_queue_io(io); | ||
971 | else | ||
972 | kcryptd_queue_crypt(io); | ||
924 | 973 | ||
925 | return DM_MAPIO_SUBMITTED; | 974 | return DM_MAPIO_SUBMITTED; |
926 | } | 975 | } |
@@ -1037,25 +1086,12 @@ static int __init dm_crypt_init(void) | |||
1037 | if (!_crypt_io_pool) | 1086 | if (!_crypt_io_pool) |
1038 | return -ENOMEM; | 1087 | return -ENOMEM; |
1039 | 1088 | ||
1040 | _kcryptd_workqueue = create_workqueue("kcryptd"); | ||
1041 | if (!_kcryptd_workqueue) { | ||
1042 | r = -ENOMEM; | ||
1043 | DMERR("couldn't create kcryptd"); | ||
1044 | goto bad1; | ||
1045 | } | ||
1046 | |||
1047 | r = dm_register_target(&crypt_target); | 1089 | r = dm_register_target(&crypt_target); |
1048 | if (r < 0) { | 1090 | if (r < 0) { |
1049 | DMERR("register failed %d", r); | 1091 | DMERR("register failed %d", r); |
1050 | goto bad2; | 1092 | kmem_cache_destroy(_crypt_io_pool); |
1051 | } | 1093 | } |
1052 | 1094 | ||
1053 | return 0; | ||
1054 | |||
1055 | bad2: | ||
1056 | destroy_workqueue(_kcryptd_workqueue); | ||
1057 | bad1: | ||
1058 | kmem_cache_destroy(_crypt_io_pool); | ||
1059 | return r; | 1095 | return r; |
1060 | } | 1096 | } |
1061 | 1097 | ||
@@ -1066,7 +1102,6 @@ static void __exit dm_crypt_exit(void) | |||
1066 | if (r < 0) | 1102 | if (r < 0) |
1067 | DMERR("unregister failed %d", r); | 1103 | DMERR("unregister failed %d", r); |
1068 | 1104 | ||
1069 | destroy_workqueue(_kcryptd_workqueue); | ||
1070 | kmem_cache_destroy(_crypt_io_pool); | 1105 | kmem_cache_destroy(_crypt_io_pool); |
1071 | } | 1106 | } |
1072 | 1107 | ||