diff options
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r-- | drivers/md/dm-crypt.c | 56 |
1 files changed, 45 insertions, 11 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 682ef9e6acd3..ce26c84af064 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #include <asm/page.h> | 23 | #include <asm/page.h> |
24 | #include <asm/unaligned.h> | 24 | #include <asm/unaligned.h> |
25 | 25 | ||
26 | #include "dm.h" | 26 | #include <linux/device-mapper.h> |
27 | 27 | ||
28 | #define DM_MSG_PREFIX "crypt" | 28 | #define DM_MSG_PREFIX "crypt" |
29 | #define MESG_STR(x) x, sizeof(x) | 29 | #define MESG_STR(x) x, sizeof(x) |
@@ -56,6 +56,7 @@ struct dm_crypt_io { | |||
56 | atomic_t pending; | 56 | atomic_t pending; |
57 | int error; | 57 | int error; |
58 | sector_t sector; | 58 | sector_t sector; |
59 | struct dm_crypt_io *base_io; | ||
59 | }; | 60 | }; |
60 | 61 | ||
61 | struct dm_crypt_request { | 62 | struct dm_crypt_request { |
@@ -93,7 +94,6 @@ struct crypt_config { | |||
93 | 94 | ||
94 | struct workqueue_struct *io_queue; | 95 | struct workqueue_struct *io_queue; |
95 | struct workqueue_struct *crypt_queue; | 96 | struct workqueue_struct *crypt_queue; |
96 | wait_queue_head_t writeq; | ||
97 | 97 | ||
98 | /* | 98 | /* |
99 | * crypto related data | 99 | * crypto related data |
@@ -534,6 +534,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti, | |||
534 | io->base_bio = bio; | 534 | io->base_bio = bio; |
535 | io->sector = sector; | 535 | io->sector = sector; |
536 | io->error = 0; | 536 | io->error = 0; |
537 | io->base_io = NULL; | ||
537 | atomic_set(&io->pending, 0); | 538 | atomic_set(&io->pending, 0); |
538 | 539 | ||
539 | return io; | 540 | return io; |
@@ -547,6 +548,7 @@ static void crypt_inc_pending(struct dm_crypt_io *io) | |||
547 | /* | 548 | /* |
548 | * One of the bios was finished. Check for completion of | 549 | * One of the bios was finished. Check for completion of |
549 | * the whole request and correctly clean up the buffer. | 550 | * the whole request and correctly clean up the buffer. |
551 | * If base_io is set, wait for the last fragment to complete. | ||
550 | */ | 552 | */ |
551 | static void crypt_dec_pending(struct dm_crypt_io *io) | 553 | static void crypt_dec_pending(struct dm_crypt_io *io) |
552 | { | 554 | { |
@@ -555,7 +557,14 @@ static void crypt_dec_pending(struct dm_crypt_io *io) | |||
555 | if (!atomic_dec_and_test(&io->pending)) | 557 | if (!atomic_dec_and_test(&io->pending)) |
556 | return; | 558 | return; |
557 | 559 | ||
558 | bio_endio(io->base_bio, io->error); | 560 | if (likely(!io->base_io)) |
561 | bio_endio(io->base_bio, io->error); | ||
562 | else { | ||
563 | if (io->error && !io->base_io->error) | ||
564 | io->base_io->error = io->error; | ||
565 | crypt_dec_pending(io->base_io); | ||
566 | } | ||
567 | |||
559 | mempool_free(io, cc->io_pool); | 568 | mempool_free(io, cc->io_pool); |
560 | } | 569 | } |
561 | 570 | ||
@@ -646,10 +655,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io) | |||
646 | static void kcryptd_io_write(struct dm_crypt_io *io) | 655 | static void kcryptd_io_write(struct dm_crypt_io *io) |
647 | { | 656 | { |
648 | struct bio *clone = io->ctx.bio_out; | 657 | struct bio *clone = io->ctx.bio_out; |
649 | struct crypt_config *cc = io->target->private; | ||
650 | |||
651 | generic_make_request(clone); | 658 | generic_make_request(clone); |
652 | wake_up(&cc->writeq); | ||
653 | } | 659 | } |
654 | 660 | ||
655 | static void kcryptd_io(struct work_struct *work) | 661 | static void kcryptd_io(struct work_struct *work) |
@@ -688,7 +694,6 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, | |||
688 | BUG_ON(io->ctx.idx_out < clone->bi_vcnt); | 694 | BUG_ON(io->ctx.idx_out < clone->bi_vcnt); |
689 | 695 | ||
690 | clone->bi_sector = cc->start + io->sector; | 696 | clone->bi_sector = cc->start + io->sector; |
691 | io->sector += bio_sectors(clone); | ||
692 | 697 | ||
693 | if (async) | 698 | if (async) |
694 | kcryptd_queue_io(io); | 699 | kcryptd_queue_io(io); |
@@ -700,16 +705,18 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) | |||
700 | { | 705 | { |
701 | struct crypt_config *cc = io->target->private; | 706 | struct crypt_config *cc = io->target->private; |
702 | struct bio *clone; | 707 | struct bio *clone; |
708 | struct dm_crypt_io *new_io; | ||
703 | int crypt_finished; | 709 | int crypt_finished; |
704 | unsigned out_of_pages = 0; | 710 | unsigned out_of_pages = 0; |
705 | unsigned remaining = io->base_bio->bi_size; | 711 | unsigned remaining = io->base_bio->bi_size; |
712 | sector_t sector = io->sector; | ||
706 | int r; | 713 | int r; |
707 | 714 | ||
708 | /* | 715 | /* |
709 | * Prevent io from disappearing until this function completes. | 716 | * Prevent io from disappearing until this function completes. |
710 | */ | 717 | */ |
711 | crypt_inc_pending(io); | 718 | crypt_inc_pending(io); |
712 | crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector); | 719 | crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); |
713 | 720 | ||
714 | /* | 721 | /* |
715 | * The allocated buffers can be smaller than the whole bio, | 722 | * The allocated buffers can be smaller than the whole bio, |
@@ -726,6 +733,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) | |||
726 | io->ctx.idx_out = 0; | 733 | io->ctx.idx_out = 0; |
727 | 734 | ||
728 | remaining -= clone->bi_size; | 735 | remaining -= clone->bi_size; |
736 | sector += bio_sectors(clone); | ||
729 | 737 | ||
730 | crypt_inc_pending(io); | 738 | crypt_inc_pending(io); |
731 | r = crypt_convert(cc, &io->ctx); | 739 | r = crypt_convert(cc, &io->ctx); |
@@ -741,6 +749,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) | |||
741 | */ | 749 | */ |
742 | if (unlikely(r < 0)) | 750 | if (unlikely(r < 0)) |
743 | break; | 751 | break; |
752 | |||
753 | io->sector = sector; | ||
744 | } | 754 | } |
745 | 755 | ||
746 | /* | 756 | /* |
@@ -750,8 +760,33 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) | |||
750 | if (unlikely(out_of_pages)) | 760 | if (unlikely(out_of_pages)) |
751 | congestion_wait(WRITE, HZ/100); | 761 | congestion_wait(WRITE, HZ/100); |
752 | 762 | ||
753 | if (unlikely(remaining)) | 763 | /* |
754 | wait_event(cc->writeq, !atomic_read(&io->ctx.pending)); | 764 | * With async crypto it is unsafe to share the crypto context |
765 | * between fragments, so switch to a new dm_crypt_io structure. | ||
766 | */ | ||
767 | if (unlikely(!crypt_finished && remaining)) { | ||
768 | new_io = crypt_io_alloc(io->target, io->base_bio, | ||
769 | sector); | ||
770 | crypt_inc_pending(new_io); | ||
771 | crypt_convert_init(cc, &new_io->ctx, NULL, | ||
772 | io->base_bio, sector); | ||
773 | new_io->ctx.idx_in = io->ctx.idx_in; | ||
774 | new_io->ctx.offset_in = io->ctx.offset_in; | ||
775 | |||
776 | /* | ||
777 | * Fragments after the first use the base_io | ||
778 | * pending count. | ||
779 | */ | ||
780 | if (!io->base_io) | ||
781 | new_io->base_io = io; | ||
782 | else { | ||
783 | new_io->base_io = io->base_io; | ||
784 | crypt_inc_pending(io->base_io); | ||
785 | crypt_dec_pending(io); | ||
786 | } | ||
787 | |||
788 | io = new_io; | ||
789 | } | ||
755 | } | 790 | } |
756 | 791 | ||
757 | crypt_dec_pending(io); | 792 | crypt_dec_pending(io); |
@@ -1078,7 +1113,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
1078 | goto bad_crypt_queue; | 1113 | goto bad_crypt_queue; |
1079 | } | 1114 | } |
1080 | 1115 | ||
1081 | init_waitqueue_head(&cc->writeq); | ||
1082 | ti->private = cc; | 1116 | ti->private = cc; |
1083 | return 0; | 1117 | return 0; |
1084 | 1118 | ||