diff options
author | Milan Broz <mbroz@redhat.com> | 2008-02-07 21:11:12 -0500 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2008-02-07 21:11:12 -0500 |
commit | 95497a960015c89c7c585d5fb953bc2816dba1e5 (patch) | |
tree | 3e1f33a86b81bf371517fc5b60db295989355a4d | |
parent | 43d6903482eec168b727bc4bf76a9f415257d862 (diff) |
dm crypt: prepare async callback fn
dm-crypt: Use crypto ablkcipher interface
Prepare callback function for async crypto operation.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Milan Broz <mbroz@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
-rw-r--r-- | drivers/md/dm-crypt.c | 45 |
1 files changed, 41 insertions, 4 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 2ea3eb99c91f..c45bd0e59dcc 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -406,11 +406,17 @@ static int crypt_convert_block(struct crypt_config *cc, | |||
406 | ctx->sector); | 406 | ctx->sector); |
407 | } | 407 | } |
408 | 408 | ||
409 | static void kcryptd_async_done(struct crypto_async_request *async_req, | ||
410 | int error); | ||
409 | static void crypt_alloc_req(struct crypt_config *cc, | 411 | static void crypt_alloc_req(struct crypt_config *cc, |
410 | struct convert_context *ctx) | 412 | struct convert_context *ctx) |
411 | { | 413 | { |
412 | if (!cc->req) | 414 | if (!cc->req) |
413 | cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); | 415 | cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); |
416 | ablkcipher_request_set_tfm(cc->req, cc->tfm); | ||
417 | ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
418 | CRYPTO_TFM_REQ_MAY_SLEEP, | ||
419 | kcryptd_async_done, ctx); | ||
414 | } | 420 | } |
415 | 421 | ||
416 | /* | 422 | /* |
@@ -615,6 +621,9 @@ static void kcryptd_io_read(struct dm_crypt_io *io) | |||
615 | 621 | ||
616 | static void kcryptd_io_write(struct dm_crypt_io *io) | 622 | static void kcryptd_io_write(struct dm_crypt_io *io) |
617 | { | 623 | { |
624 | struct bio *clone = io->ctx.bio_out; | ||
625 | |||
626 | generic_make_request(clone); | ||
618 | } | 627 | } |
619 | 628 | ||
620 | static void kcryptd_io(struct work_struct *work) | 629 | static void kcryptd_io(struct work_struct *work) |
@@ -635,7 +644,8 @@ static void kcryptd_queue_io(struct dm_crypt_io *io) | |||
635 | queue_work(cc->io_queue, &io->work); | 644 | queue_work(cc->io_queue, &io->work); |
636 | } | 645 | } |
637 | 646 | ||
638 | static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int error) | 647 | static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, |
648 | int error, int async) | ||
639 | { | 649 | { |
640 | struct bio *clone = io->ctx.bio_out; | 650 | struct bio *clone = io->ctx.bio_out; |
641 | struct crypt_config *cc = io->target->private; | 651 | struct crypt_config *cc = io->target->private; |
@@ -653,8 +663,12 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int error) | |||
653 | clone->bi_sector = cc->start + io->sector; | 663 | clone->bi_sector = cc->start + io->sector; |
654 | io->sector += bio_sectors(clone); | 664 | io->sector += bio_sectors(clone); |
655 | 665 | ||
656 | atomic_inc(&io->pending); | 666 | if (async) |
657 | generic_make_request(clone); | 667 | kcryptd_queue_io(io); |
668 | else { | ||
669 | atomic_inc(&io->pending); | ||
670 | generic_make_request(clone); | ||
671 | } | ||
658 | } | 672 | } |
659 | 673 | ||
660 | static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io) | 674 | static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io) |
@@ -682,7 +696,7 @@ static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io) | |||
682 | 696 | ||
683 | r = crypt_convert(cc, &io->ctx); | 697 | r = crypt_convert(cc, &io->ctx); |
684 | 698 | ||
685 | kcryptd_crypt_write_io_submit(io, r); | 699 | kcryptd_crypt_write_io_submit(io, r, 0); |
686 | if (unlikely(r < 0)) | 700 | if (unlikely(r < 0)) |
687 | return; | 701 | return; |
688 | 702 | ||
@@ -728,6 +742,29 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) | |||
728 | kcryptd_crypt_read_done(io, r); | 742 | kcryptd_crypt_read_done(io, r); |
729 | } | 743 | } |
730 | 744 | ||
745 | static void kcryptd_async_done(struct crypto_async_request *async_req, | ||
746 | int error) | ||
747 | { | ||
748 | struct convert_context *ctx = async_req->data; | ||
749 | struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); | ||
750 | struct crypt_config *cc = io->target->private; | ||
751 | |||
752 | if (error == -EINPROGRESS) { | ||
753 | complete(&ctx->restart); | ||
754 | return; | ||
755 | } | ||
756 | |||
757 | mempool_free(ablkcipher_request_cast(async_req), cc->req_pool); | ||
758 | |||
759 | if (!atomic_dec_and_test(&ctx->pending)) | ||
760 | return; | ||
761 | |||
762 | if (bio_data_dir(io->base_bio) == READ) | ||
763 | kcryptd_crypt_read_done(io, error); | ||
764 | else | ||
765 | kcryptd_crypt_write_io_submit(io, error, 1); | ||
766 | } | ||
767 | |||
731 | static void kcryptd_crypt(struct work_struct *work) | 768 | static void kcryptd_crypt(struct work_struct *work) |
732 | { | 769 | { |
733 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); | 770 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); |