diff options
author | Milan Broz <mbroz@redhat.com> | 2008-10-21 12:45:02 -0400 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2008-10-21 12:45:02 -0400 |
commit | 393b47ef23bbcf16890c907d0144b5a8ec4caebf (patch) | |
tree | 8f3f26715fe81ef5fb78a6f30a46e163873de6b9 /drivers/md/dm-crypt.c | |
parent | b635b00e0e159d858486fd899c4021d1d67757e2 (diff) |
dm crypt: fix async split
When writing io, dm-crypt has to allocate a new cloned bio
and encrypt the data into newly-allocated pages attached to this bio.
In rare cases, because of hw restrictions (e.g. physical segment limit)
or memory pressure, sometimes more than one cloned bio has to be used,
each processing a different fragment of the original.
Currently there is one waitqueue which waits for one fragment to finish
and continues processing the next fragment.
But when using asynchronous crypto this doesn't work, because several
fragments may be processed asynchronously or in parallel and there is
only one crypt context that cannot be shared between the bio fragments.
The result may be corruption of the data contained in the encrypted bio.
The patch fixes this by allocating new dm_crypt_io structs (with new
crypto contexts) and running them independently.
The fragments contains a pointer to the base dm_crypt_io struct to
handle reference counting, so the base one is properly deallocated
after all the fragments are finished.
In a low memory situation, this only uses one additional object from the
mempool. If the mempool is empty, the next allocation simple waits for
previous fragments to complete.
Signed-off-by: Milan Broz <mbroz@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r-- | drivers/md/dm-crypt.c | 41 |
1 files changed, 40 insertions, 1 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index ad98ded3008c..046ee516074b 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -56,6 +56,7 @@ struct dm_crypt_io { | |||
56 | atomic_t pending; | 56 | atomic_t pending; |
57 | int error; | 57 | int error; |
58 | sector_t sector; | 58 | sector_t sector; |
59 | struct dm_crypt_io *base_io; | ||
59 | }; | 60 | }; |
60 | 61 | ||
61 | struct dm_crypt_request { | 62 | struct dm_crypt_request { |
@@ -534,6 +535,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti, | |||
534 | io->base_bio = bio; | 535 | io->base_bio = bio; |
535 | io->sector = sector; | 536 | io->sector = sector; |
536 | io->error = 0; | 537 | io->error = 0; |
538 | io->base_io = NULL; | ||
537 | atomic_set(&io->pending, 0); | 539 | atomic_set(&io->pending, 0); |
538 | 540 | ||
539 | return io; | 541 | return io; |
@@ -547,6 +549,7 @@ static void crypt_inc_pending(struct dm_crypt_io *io) | |||
547 | /* | 549 | /* |
548 | * One of the bios was finished. Check for completion of | 550 | * One of the bios was finished. Check for completion of |
549 | * the whole request and correctly clean up the buffer. | 551 | * the whole request and correctly clean up the buffer. |
552 | * If base_io is set, wait for the last fragment to complete. | ||
550 | */ | 553 | */ |
551 | static void crypt_dec_pending(struct dm_crypt_io *io) | 554 | static void crypt_dec_pending(struct dm_crypt_io *io) |
552 | { | 555 | { |
@@ -555,7 +558,14 @@ static void crypt_dec_pending(struct dm_crypt_io *io) | |||
555 | if (!atomic_dec_and_test(&io->pending)) | 558 | if (!atomic_dec_and_test(&io->pending)) |
556 | return; | 559 | return; |
557 | 560 | ||
558 | bio_endio(io->base_bio, io->error); | 561 | if (likely(!io->base_io)) |
562 | bio_endio(io->base_bio, io->error); | ||
563 | else { | ||
564 | if (io->error && !io->base_io->error) | ||
565 | io->base_io->error = io->error; | ||
566 | crypt_dec_pending(io->base_io); | ||
567 | } | ||
568 | |||
559 | mempool_free(io, cc->io_pool); | 569 | mempool_free(io, cc->io_pool); |
560 | } | 570 | } |
561 | 571 | ||
@@ -699,6 +709,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) | |||
699 | { | 709 | { |
700 | struct crypt_config *cc = io->target->private; | 710 | struct crypt_config *cc = io->target->private; |
701 | struct bio *clone; | 711 | struct bio *clone; |
712 | struct dm_crypt_io *new_io; | ||
702 | int crypt_finished; | 713 | int crypt_finished; |
703 | unsigned out_of_pages = 0; | 714 | unsigned out_of_pages = 0; |
704 | unsigned remaining = io->base_bio->bi_size; | 715 | unsigned remaining = io->base_bio->bi_size; |
@@ -753,6 +764,34 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) | |||
753 | if (unlikely(out_of_pages)) | 764 | if (unlikely(out_of_pages)) |
754 | congestion_wait(WRITE, HZ/100); | 765 | congestion_wait(WRITE, HZ/100); |
755 | 766 | ||
767 | /* | ||
768 | * With async crypto it is unsafe to share the crypto context | ||
769 | * between fragments, so switch to a new dm_crypt_io structure. | ||
770 | */ | ||
771 | if (unlikely(!crypt_finished && remaining)) { | ||
772 | new_io = crypt_io_alloc(io->target, io->base_bio, | ||
773 | sector); | ||
774 | crypt_inc_pending(new_io); | ||
775 | crypt_convert_init(cc, &new_io->ctx, NULL, | ||
776 | io->base_bio, sector); | ||
777 | new_io->ctx.idx_in = io->ctx.idx_in; | ||
778 | new_io->ctx.offset_in = io->ctx.offset_in; | ||
779 | |||
780 | /* | ||
781 | * Fragments after the first use the base_io | ||
782 | * pending count. | ||
783 | */ | ||
784 | if (!io->base_io) | ||
785 | new_io->base_io = io; | ||
786 | else { | ||
787 | new_io->base_io = io->base_io; | ||
788 | crypt_inc_pending(io->base_io); | ||
789 | crypt_dec_pending(io); | ||
790 | } | ||
791 | |||
792 | io = new_io; | ||
793 | } | ||
794 | |||
756 | if (unlikely(remaining)) | 795 | if (unlikely(remaining)) |
757 | wait_event(cc->writeq, !atomic_read(&io->ctx.pending)); | 796 | wait_event(cc->writeq, !atomic_read(&io->ctx.pending)); |
758 | } | 797 | } |