aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-crypt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r--drivers/md/dm-crypt.c170
1 files changed, 102 insertions, 68 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 4a5849b55197..0eb5416798bd 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -36,7 +36,6 @@ struct dm_crypt_io {
36 struct work_struct work; 36 struct work_struct work;
37 atomic_t pending; 37 atomic_t pending;
38 int error; 38 int error;
39 int post_process;
40}; 39};
41 40
42/* 41/*
@@ -57,7 +56,7 @@ struct crypt_config;
57 56
58struct crypt_iv_operations { 57struct crypt_iv_operations {
59 int (*ctr)(struct crypt_config *cc, struct dm_target *ti, 58 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
60 const char *opts); 59 const char *opts);
61 void (*dtr)(struct crypt_config *cc); 60 void (*dtr)(struct crypt_config *cc);
62 const char *(*status)(struct crypt_config *cc); 61 const char *(*status)(struct crypt_config *cc);
63 int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector); 62 int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
@@ -80,6 +79,8 @@ struct crypt_config {
80 mempool_t *page_pool; 79 mempool_t *page_pool;
81 struct bio_set *bs; 80 struct bio_set *bs;
82 81
82 struct workqueue_struct *io_queue;
83 struct workqueue_struct *crypt_queue;
83 /* 84 /*
84 * crypto related data 85 * crypto related data
85 */ 86 */
@@ -137,7 +138,7 @@ static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
137} 138}
138 139
139static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, 140static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
140 const char *opts) 141 const char *opts)
141{ 142{
142 struct crypto_cipher *essiv_tfm; 143 struct crypto_cipher *essiv_tfm;
143 struct crypto_hash *hash_tfm; 144 struct crypto_hash *hash_tfm;
@@ -175,6 +176,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
175 176
176 if (err) { 177 if (err) {
177 ti->error = "Error calculating hash in ESSIV"; 178 ti->error = "Error calculating hash in ESSIV";
179 kfree(salt);
178 return err; 180 return err;
179 } 181 }
180 182
@@ -188,7 +190,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
188 if (crypto_cipher_blocksize(essiv_tfm) != 190 if (crypto_cipher_blocksize(essiv_tfm) !=
189 crypto_blkcipher_ivsize(cc->tfm)) { 191 crypto_blkcipher_ivsize(cc->tfm)) {
190 ti->error = "Block size of ESSIV cipher does " 192 ti->error = "Block size of ESSIV cipher does "
191 "not match IV size of block cipher"; 193 "not match IV size of block cipher";
192 crypto_free_cipher(essiv_tfm); 194 crypto_free_cipher(essiv_tfm);
193 kfree(salt); 195 kfree(salt);
194 return -EINVAL; 196 return -EINVAL;
@@ -319,10 +321,10 @@ crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
319 return r; 321 return r;
320} 322}
321 323
322static void 324static void crypt_convert_init(struct crypt_config *cc,
323crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx, 325 struct convert_context *ctx,
324 struct bio *bio_out, struct bio *bio_in, 326 struct bio *bio_out, struct bio *bio_in,
325 sector_t sector, int write) 327 sector_t sector, int write)
326{ 328{
327 ctx->bio_in = bio_in; 329 ctx->bio_in = bio_in;
328 ctx->bio_out = bio_out; 330 ctx->bio_out = bio_out;
@@ -338,7 +340,7 @@ crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx,
338 * Encrypt / decrypt data from one bio to another one (can be the same one) 340 * Encrypt / decrypt data from one bio to another one (can be the same one)
339 */ 341 */
340static int crypt_convert(struct crypt_config *cc, 342static int crypt_convert(struct crypt_config *cc,
341 struct convert_context *ctx) 343 struct convert_context *ctx)
342{ 344{
343 int r = 0; 345 int r = 0;
344 346
@@ -370,7 +372,7 @@ static int crypt_convert(struct crypt_config *cc,
370 } 372 }
371 373
372 r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length, 374 r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length,
373 ctx->write, ctx->sector); 375 ctx->write, ctx->sector);
374 if (r < 0) 376 if (r < 0)
375 break; 377 break;
376 378
@@ -380,13 +382,13 @@ static int crypt_convert(struct crypt_config *cc,
380 return r; 382 return r;
381} 383}
382 384
383 static void dm_crypt_bio_destructor(struct bio *bio) 385static void dm_crypt_bio_destructor(struct bio *bio)
384 { 386{
385 struct dm_crypt_io *io = bio->bi_private; 387 struct dm_crypt_io *io = bio->bi_private;
386 struct crypt_config *cc = io->target->private; 388 struct crypt_config *cc = io->target->private;
387 389
388 bio_free(bio, cc->bs); 390 bio_free(bio, cc->bs);
389 } 391}
390 392
391/* 393/*
392 * Generate a new unfragmented bio with the given size 394 * Generate a new unfragmented bio with the given size
@@ -458,7 +460,7 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
458 * One of the bios was finished. Check for completion of 460 * One of the bios was finished. Check for completion of
459 * the whole request and correctly clean up the buffer. 461 * the whole request and correctly clean up the buffer.
460 */ 462 */
461static void dec_pending(struct dm_crypt_io *io, int error) 463static void crypt_dec_pending(struct dm_crypt_io *io, int error)
462{ 464{
463 struct crypt_config *cc = (struct crypt_config *) io->target->private; 465 struct crypt_config *cc = (struct crypt_config *) io->target->private;
464 466
@@ -474,18 +476,36 @@ static void dec_pending(struct dm_crypt_io *io, int error)
474} 476}
475 477
476/* 478/*
477 * kcryptd: 479 * kcryptd/kcryptd_io:
478 * 480 *
479 * Needed because it would be very unwise to do decryption in an 481 * Needed because it would be very unwise to do decryption in an
480 * interrupt context. 482 * interrupt context.
483 *
484 * kcryptd performs the actual encryption or decryption.
485 *
486 * kcryptd_io performs the IO submission.
487 *
488 * They must be separated as otherwise the final stages could be
489 * starved by new requests which can block in the first stages due
490 * to memory allocation.
481 */ 491 */
482static struct workqueue_struct *_kcryptd_workqueue;
483static void kcryptd_do_work(struct work_struct *work); 492static void kcryptd_do_work(struct work_struct *work);
493static void kcryptd_do_crypt(struct work_struct *work);
484 494
485static void kcryptd_queue_io(struct dm_crypt_io *io) 495static void kcryptd_queue_io(struct dm_crypt_io *io)
486{ 496{
497 struct crypt_config *cc = io->target->private;
498
487 INIT_WORK(&io->work, kcryptd_do_work); 499 INIT_WORK(&io->work, kcryptd_do_work);
488 queue_work(_kcryptd_workqueue, &io->work); 500 queue_work(cc->io_queue, &io->work);
501}
502
503static void kcryptd_queue_crypt(struct dm_crypt_io *io)
504{
505 struct crypt_config *cc = io->target->private;
506
507 INIT_WORK(&io->work, kcryptd_do_crypt);
508 queue_work(cc->crypt_queue, &io->work);
489} 509}
490 510
491static void crypt_endio(struct bio *clone, int error) 511static void crypt_endio(struct bio *clone, int error)
@@ -508,13 +528,12 @@ static void crypt_endio(struct bio *clone, int error)
508 } 528 }
509 529
510 bio_put(clone); 530 bio_put(clone);
511 io->post_process = 1; 531 kcryptd_queue_crypt(io);
512 kcryptd_queue_io(io);
513 return; 532 return;
514 533
515out: 534out:
516 bio_put(clone); 535 bio_put(clone);
517 dec_pending(io, error); 536 crypt_dec_pending(io, error);
518} 537}
519 538
520static void clone_init(struct dm_crypt_io *io, struct bio *clone) 539static void clone_init(struct dm_crypt_io *io, struct bio *clone)
@@ -544,7 +563,7 @@ static void process_read(struct dm_crypt_io *io)
544 */ 563 */
545 clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs); 564 clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs);
546 if (unlikely(!clone)) { 565 if (unlikely(!clone)) {
547 dec_pending(io, -ENOMEM); 566 crypt_dec_pending(io, -ENOMEM);
548 return; 567 return;
549 } 568 }
550 569
@@ -579,7 +598,7 @@ static void process_write(struct dm_crypt_io *io)
579 while (remaining) { 598 while (remaining) {
580 clone = crypt_alloc_buffer(io, remaining); 599 clone = crypt_alloc_buffer(io, remaining);
581 if (unlikely(!clone)) { 600 if (unlikely(!clone)) {
582 dec_pending(io, -ENOMEM); 601 crypt_dec_pending(io, -ENOMEM);
583 return; 602 return;
584 } 603 }
585 604
@@ -589,7 +608,7 @@ static void process_write(struct dm_crypt_io *io)
589 if (unlikely(crypt_convert(cc, &ctx) < 0)) { 608 if (unlikely(crypt_convert(cc, &ctx) < 0)) {
590 crypt_free_buffer_pages(cc, clone); 609 crypt_free_buffer_pages(cc, clone);
591 bio_put(clone); 610 bio_put(clone);
592 dec_pending(io, -EIO); 611 crypt_dec_pending(io, -EIO);
593 return; 612 return;
594 } 613 }
595 614
@@ -624,17 +643,23 @@ static void process_read_endio(struct dm_crypt_io *io)
624 crypt_convert_init(cc, &ctx, io->base_bio, io->base_bio, 643 crypt_convert_init(cc, &ctx, io->base_bio, io->base_bio,
625 io->base_bio->bi_sector - io->target->begin, 0); 644 io->base_bio->bi_sector - io->target->begin, 0);
626 645
627 dec_pending(io, crypt_convert(cc, &ctx)); 646 crypt_dec_pending(io, crypt_convert(cc, &ctx));
628} 647}
629 648
630static void kcryptd_do_work(struct work_struct *work) 649static void kcryptd_do_work(struct work_struct *work)
631{ 650{
632 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 651 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
633 652
634 if (io->post_process) 653 if (bio_data_dir(io->base_bio) == READ)
635 process_read_endio(io);
636 else if (bio_data_dir(io->base_bio) == READ)
637 process_read(io); 654 process_read(io);
655}
656
657static void kcryptd_do_crypt(struct work_struct *work)
658{
659 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
660
661 if (bio_data_dir(io->base_bio) == READ)
662 process_read_endio(io);
638 else 663 else
639 process_write(io); 664 process_write(io);
640} 665}
@@ -690,7 +715,7 @@ static int crypt_set_key(struct crypt_config *cc, char *key)
690 cc->key_size = key_size; /* initial settings */ 715 cc->key_size = key_size; /* initial settings */
691 716
692 if ((!key_size && strcmp(key, "-")) || 717 if ((!key_size && strcmp(key, "-")) ||
693 (key_size && crypt_decode_key(cc->key, key, key_size) < 0)) 718 (key_size && crypt_decode_key(cc->key, key, key_size) < 0))
694 return -EINVAL; 719 return -EINVAL;
695 720
696 set_bit(DM_CRYPT_KEY_VALID, &cc->flags); 721 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
@@ -746,7 +771,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
746 771
747 if (crypt_set_key(cc, argv[1])) { 772 if (crypt_set_key(cc, argv[1])) {
748 ti->error = "Error decoding key"; 773 ti->error = "Error decoding key";
749 goto bad1; 774 goto bad_cipher;
750 } 775 }
751 776
752 /* Compatiblity mode for old dm-crypt cipher strings */ 777 /* Compatiblity mode for old dm-crypt cipher strings */
@@ -757,19 +782,19 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
757 782
758 if (strcmp(chainmode, "ecb") && !ivmode) { 783 if (strcmp(chainmode, "ecb") && !ivmode) {
759 ti->error = "This chaining mode requires an IV mechanism"; 784 ti->error = "This chaining mode requires an IV mechanism";
760 goto bad1; 785 goto bad_cipher;
761 } 786 }
762 787
763 if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)", chainmode, 788 if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)",
764 cipher) >= CRYPTO_MAX_ALG_NAME) { 789 chainmode, cipher) >= CRYPTO_MAX_ALG_NAME) {
765 ti->error = "Chain mode + cipher name is too long"; 790 ti->error = "Chain mode + cipher name is too long";
766 goto bad1; 791 goto bad_cipher;
767 } 792 }
768 793
769 tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); 794 tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
770 if (IS_ERR(tfm)) { 795 if (IS_ERR(tfm)) {
771 ti->error = "Error allocating crypto tfm"; 796 ti->error = "Error allocating crypto tfm";
772 goto bad1; 797 goto bad_cipher;
773 } 798 }
774 799
775 strcpy(cc->cipher, cipher); 800 strcpy(cc->cipher, cipher);
@@ -793,18 +818,18 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
793 cc->iv_gen_ops = &crypt_iv_null_ops; 818 cc->iv_gen_ops = &crypt_iv_null_ops;
794 else { 819 else {
795 ti->error = "Invalid IV mode"; 820 ti->error = "Invalid IV mode";
796 goto bad2; 821 goto bad_ivmode;
797 } 822 }
798 823
799 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr && 824 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr &&
800 cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) 825 cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
801 goto bad2; 826 goto bad_ivmode;
802 827
803 cc->iv_size = crypto_blkcipher_ivsize(tfm); 828 cc->iv_size = crypto_blkcipher_ivsize(tfm);
804 if (cc->iv_size) 829 if (cc->iv_size)
805 /* at least a 64 bit sector number should fit in our buffer */ 830 /* at least a 64 bit sector number should fit in our buffer */
806 cc->iv_size = max(cc->iv_size, 831 cc->iv_size = max(cc->iv_size,
807 (unsigned int)(sizeof(u64) / sizeof(u8))); 832 (unsigned int)(sizeof(u64) / sizeof(u8)));
808 else { 833 else {
809 if (cc->iv_gen_ops) { 834 if (cc->iv_gen_ops) {
810 DMWARN("Selected cipher does not support IVs"); 835 DMWARN("Selected cipher does not support IVs");
@@ -817,13 +842,13 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
817 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); 842 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
818 if (!cc->io_pool) { 843 if (!cc->io_pool) {
819 ti->error = "Cannot allocate crypt io mempool"; 844 ti->error = "Cannot allocate crypt io mempool";
820 goto bad3; 845 goto bad_slab_pool;
821 } 846 }
822 847
823 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); 848 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
824 if (!cc->page_pool) { 849 if (!cc->page_pool) {
825 ti->error = "Cannot allocate page mempool"; 850 ti->error = "Cannot allocate page mempool";
826 goto bad4; 851 goto bad_page_pool;
827 } 852 }
828 853
829 cc->bs = bioset_create(MIN_IOS, MIN_IOS); 854 cc->bs = bioset_create(MIN_IOS, MIN_IOS);
@@ -834,25 +859,25 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
834 859
835 if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) { 860 if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) {
836 ti->error = "Error setting key"; 861 ti->error = "Error setting key";
837 goto bad5; 862 goto bad_device;
838 } 863 }
839 864
840 if (sscanf(argv[2], "%llu", &tmpll) != 1) { 865 if (sscanf(argv[2], "%llu", &tmpll) != 1) {
841 ti->error = "Invalid iv_offset sector"; 866 ti->error = "Invalid iv_offset sector";
842 goto bad5; 867 goto bad_device;
843 } 868 }
844 cc->iv_offset = tmpll; 869 cc->iv_offset = tmpll;
845 870
846 if (sscanf(argv[4], "%llu", &tmpll) != 1) { 871 if (sscanf(argv[4], "%llu", &tmpll) != 1) {
847 ti->error = "Invalid device sector"; 872 ti->error = "Invalid device sector";
848 goto bad5; 873 goto bad_device;
849 } 874 }
850 cc->start = tmpll; 875 cc->start = tmpll;
851 876
852 if (dm_get_device(ti, argv[3], cc->start, ti->len, 877 if (dm_get_device(ti, argv[3], cc->start, ti->len,
853 dm_table_get_mode(ti->table), &cc->dev)) { 878 dm_table_get_mode(ti->table), &cc->dev)) {
854 ti->error = "Device lookup failed"; 879 ti->error = "Device lookup failed";
855 goto bad5; 880 goto bad_device;
856 } 881 }
857 882
858 if (ivmode && cc->iv_gen_ops) { 883 if (ivmode && cc->iv_gen_ops) {
@@ -861,27 +886,45 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
861 cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL); 886 cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL);
862 if (!cc->iv_mode) { 887 if (!cc->iv_mode) {
863 ti->error = "Error kmallocing iv_mode string"; 888 ti->error = "Error kmallocing iv_mode string";
864 goto bad5; 889 goto bad_ivmode_string;
865 } 890 }
866 strcpy(cc->iv_mode, ivmode); 891 strcpy(cc->iv_mode, ivmode);
867 } else 892 } else
868 cc->iv_mode = NULL; 893 cc->iv_mode = NULL;
869 894
895 cc->io_queue = create_singlethread_workqueue("kcryptd_io");
896 if (!cc->io_queue) {
897 ti->error = "Couldn't create kcryptd io queue";
898 goto bad_io_queue;
899 }
900
901 cc->crypt_queue = create_singlethread_workqueue("kcryptd");
902 if (!cc->crypt_queue) {
903 ti->error = "Couldn't create kcryptd queue";
904 goto bad_crypt_queue;
905 }
906
870 ti->private = cc; 907 ti->private = cc;
871 return 0; 908 return 0;
872 909
873bad5: 910bad_crypt_queue:
911 destroy_workqueue(cc->io_queue);
912bad_io_queue:
913 kfree(cc->iv_mode);
914bad_ivmode_string:
915 dm_put_device(ti, cc->dev);
916bad_device:
874 bioset_free(cc->bs); 917 bioset_free(cc->bs);
875bad_bs: 918bad_bs:
876 mempool_destroy(cc->page_pool); 919 mempool_destroy(cc->page_pool);
877bad4: 920bad_page_pool:
878 mempool_destroy(cc->io_pool); 921 mempool_destroy(cc->io_pool);
879bad3: 922bad_slab_pool:
880 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 923 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
881 cc->iv_gen_ops->dtr(cc); 924 cc->iv_gen_ops->dtr(cc);
882bad2: 925bad_ivmode:
883 crypto_free_blkcipher(tfm); 926 crypto_free_blkcipher(tfm);
884bad1: 927bad_cipher:
885 /* Must zero key material before freeing */ 928 /* Must zero key material before freeing */
886 memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); 929 memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
887 kfree(cc); 930 kfree(cc);
@@ -892,7 +935,8 @@ static void crypt_dtr(struct dm_target *ti)
892{ 935{
893 struct crypt_config *cc = (struct crypt_config *) ti->private; 936 struct crypt_config *cc = (struct crypt_config *) ti->private;
894 937
895 flush_workqueue(_kcryptd_workqueue); 938 destroy_workqueue(cc->io_queue);
939 destroy_workqueue(cc->crypt_queue);
896 940
897 bioset_free(cc->bs); 941 bioset_free(cc->bs);
898 mempool_destroy(cc->page_pool); 942 mempool_destroy(cc->page_pool);
@@ -918,9 +962,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
918 io = mempool_alloc(cc->io_pool, GFP_NOIO); 962 io = mempool_alloc(cc->io_pool, GFP_NOIO);
919 io->target = ti; 963 io->target = ti;
920 io->base_bio = bio; 964 io->base_bio = bio;
921 io->error = io->post_process = 0; 965 io->error = 0;
922 atomic_set(&io->pending, 0); 966 atomic_set(&io->pending, 0);
923 kcryptd_queue_io(io); 967
968 if (bio_data_dir(io->base_bio) == READ)
969 kcryptd_queue_io(io);
970 else
971 kcryptd_queue_crypt(io);
924 972
925 return DM_MAPIO_SUBMITTED; 973 return DM_MAPIO_SUBMITTED;
926} 974}
@@ -1037,25 +1085,12 @@ static int __init dm_crypt_init(void)
1037 if (!_crypt_io_pool) 1085 if (!_crypt_io_pool)
1038 return -ENOMEM; 1086 return -ENOMEM;
1039 1087
1040 _kcryptd_workqueue = create_workqueue("kcryptd");
1041 if (!_kcryptd_workqueue) {
1042 r = -ENOMEM;
1043 DMERR("couldn't create kcryptd");
1044 goto bad1;
1045 }
1046
1047 r = dm_register_target(&crypt_target); 1088 r = dm_register_target(&crypt_target);
1048 if (r < 0) { 1089 if (r < 0) {
1049 DMERR("register failed %d", r); 1090 DMERR("register failed %d", r);
1050 goto bad2; 1091 kmem_cache_destroy(_crypt_io_pool);
1051 } 1092 }
1052 1093
1053 return 0;
1054
1055bad2:
1056 destroy_workqueue(_kcryptd_workqueue);
1057bad1:
1058 kmem_cache_destroy(_crypt_io_pool);
1059 return r; 1094 return r;
1060} 1095}
1061 1096
@@ -1066,7 +1101,6 @@ static void __exit dm_crypt_exit(void)
1066 if (r < 0) 1101 if (r < 0)
1067 DMERR("unregister failed %d", r); 1102 DMERR("unregister failed %d", r);
1068 1103
1069 destroy_workqueue(_kcryptd_workqueue);
1070 kmem_cache_destroy(_crypt_io_pool); 1104 kmem_cache_destroy(_crypt_io_pool);
1071} 1105}
1072 1106