diff options
author | Milan Broz <mbroz@redhat.com> | 2006-10-03 04:15:38 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-10-03 11:04:15 -0400 |
commit | 93e605c237a61f5a0ea37b12353392f01d596628 (patch) | |
tree | c3e9932a19b95c9a4e4fb41527dc4c3c40c24057 /drivers/md | |
parent | 8b004457168995f2ae2a35327f885183a9e74141 (diff) |
[PATCH] dm crypt: restructure write processing
Restructure the dm-crypt write processing in preparation for workqueue changes
in the next patches.
Signed-off-by: Milan Broz <mbroz@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-crypt.c | 152 |
1 files changed, 76 insertions, 76 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 3783cf978850..946a9ebc89db 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -504,12 +504,14 @@ static void clone_init(struct crypt_io *io, struct bio *clone) | |||
504 | clone->bi_rw = io->base_bio->bi_rw; | 504 | clone->bi_rw = io->base_bio->bi_rw; |
505 | } | 505 | } |
506 | 506 | ||
507 | static struct bio *clone_read(struct crypt_io *io, | 507 | static int process_read(struct crypt_io *io) |
508 | sector_t sector) | ||
509 | { | 508 | { |
510 | struct crypt_config *cc = io->target->private; | 509 | struct crypt_config *cc = io->target->private; |
511 | struct bio *base_bio = io->base_bio; | 510 | struct bio *base_bio = io->base_bio; |
512 | struct bio *clone; | 511 | struct bio *clone; |
512 | sector_t sector = base_bio->bi_sector - io->target->begin; | ||
513 | |||
514 | atomic_inc(&io->pending); | ||
513 | 515 | ||
514 | /* | 516 | /* |
515 | * The block layer might modify the bvec array, so always | 517 | * The block layer might modify the bvec array, so always |
@@ -517,47 +519,94 @@ static struct bio *clone_read(struct crypt_io *io, | |||
517 | * one in order to decrypt the whole bio data *afterwards*. | 519 | * one in order to decrypt the whole bio data *afterwards*. |
518 | */ | 520 | */ |
519 | clone = bio_alloc(GFP_NOIO, bio_segments(base_bio)); | 521 | clone = bio_alloc(GFP_NOIO, bio_segments(base_bio)); |
520 | if (unlikely(!clone)) | 522 | if (unlikely(!clone)) { |
521 | return NULL; | 523 | dec_pending(io, -ENOMEM); |
524 | return 0; | ||
525 | } | ||
522 | 526 | ||
523 | clone_init(io, clone); | 527 | clone_init(io, clone); |
524 | clone->bi_idx = 0; | 528 | clone->bi_idx = 0; |
525 | clone->bi_vcnt = bio_segments(base_bio); | 529 | clone->bi_vcnt = bio_segments(base_bio); |
526 | clone->bi_size = base_bio->bi_size; | 530 | clone->bi_size = base_bio->bi_size; |
531 | clone->bi_sector = cc->start + sector; | ||
527 | memcpy(clone->bi_io_vec, bio_iovec(base_bio), | 532 | memcpy(clone->bi_io_vec, bio_iovec(base_bio), |
528 | sizeof(struct bio_vec) * clone->bi_vcnt); | 533 | sizeof(struct bio_vec) * clone->bi_vcnt); |
529 | clone->bi_sector = cc->start + sector; | ||
530 | 534 | ||
531 | return clone; | 535 | generic_make_request(clone); |
536 | |||
537 | return 0; | ||
532 | } | 538 | } |
533 | 539 | ||
534 | static struct bio *clone_write(struct crypt_io *io, | 540 | static int process_write(struct crypt_io *io) |
535 | sector_t sector, | ||
536 | unsigned *bvec_idx, | ||
537 | struct convert_context *ctx) | ||
538 | { | 541 | { |
539 | struct crypt_config *cc = io->target->private; | 542 | struct crypt_config *cc = io->target->private; |
540 | struct bio *base_bio = io->base_bio; | 543 | struct bio *base_bio = io->base_bio; |
541 | struct bio *clone; | 544 | struct bio *clone; |
545 | struct convert_context ctx; | ||
546 | unsigned remaining = base_bio->bi_size; | ||
547 | sector_t sector = base_bio->bi_sector - io->target->begin; | ||
548 | unsigned bvec_idx = 0; | ||
542 | 549 | ||
543 | clone = crypt_alloc_buffer(cc, base_bio->bi_size, | 550 | atomic_inc(&io->pending); |
544 | io->first_clone, bvec_idx); | ||
545 | if (!clone) | ||
546 | return NULL; | ||
547 | 551 | ||
548 | ctx->bio_out = clone; | 552 | crypt_convert_init(cc, &ctx, NULL, base_bio, sector, 1); |
549 | 553 | ||
550 | if (unlikely(crypt_convert(cc, ctx) < 0)) { | 554 | /* |
551 | crypt_free_buffer_pages(cc, clone, | 555 | * The allocated buffers can be smaller than the whole bio, |
552 | clone->bi_size); | 556 | * so repeat the whole process until all the data can be handled. |
553 | bio_put(clone); | 557 | */ |
554 | return NULL; | 558 | while (remaining) { |
559 | clone = crypt_alloc_buffer(cc, base_bio->bi_size, | ||
560 | io->first_clone, &bvec_idx); | ||
561 | if (unlikely(!clone)) | ||
562 | goto cleanup; | ||
563 | |||
564 | ctx.bio_out = clone; | ||
565 | |||
566 | if (unlikely(crypt_convert(cc, &ctx) < 0)) { | ||
567 | crypt_free_buffer_pages(cc, clone, clone->bi_size); | ||
568 | bio_put(clone); | ||
569 | goto cleanup; | ||
570 | } | ||
571 | |||
572 | clone_init(io, clone); | ||
573 | clone->bi_sector = cc->start + sector; | ||
574 | |||
575 | if (!io->first_clone) { | ||
576 | /* | ||
577 | * hold a reference to the first clone, because it | ||
578 | * holds the bio_vec array and that can't be freed | ||
579 | * before all other clones are released | ||
580 | */ | ||
581 | bio_get(clone); | ||
582 | io->first_clone = clone; | ||
583 | } | ||
584 | |||
585 | atomic_inc(&io->pending); | ||
586 | |||
587 | remaining -= clone->bi_size; | ||
588 | sector += bio_sectors(clone); | ||
589 | |||
590 | generic_make_request(clone); | ||
591 | |||
592 | /* out of memory -> run queues */ | ||
593 | if (remaining) | ||
594 | blk_congestion_wait(bio_data_dir(clone), HZ/100); | ||
555 | } | 595 | } |
556 | 596 | ||
557 | clone_init(io, clone); | 597 | /* drop reference, clones could have returned before we reach this */ |
558 | clone->bi_sector = cc->start + sector; | 598 | dec_pending(io, 0); |
599 | return 0; | ||
559 | 600 | ||
560 | return clone; | 601 | cleanup: |
602 | if (io->first_clone) { | ||
603 | dec_pending(io, -ENOMEM); | ||
604 | return 0; | ||
605 | } | ||
606 | |||
607 | /* if no bio has been dispatched yet, we can directly return the error */ | ||
608 | mempool_free(io, cc->io_pool); | ||
609 | return -ENOMEM; | ||
561 | } | 610 | } |
562 | 611 | ||
563 | static void process_read_endio(struct crypt_io *io) | 612 | static void process_read_endio(struct crypt_io *io) |
@@ -838,68 +887,19 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, | |||
838 | { | 887 | { |
839 | struct crypt_config *cc = ti->private; | 888 | struct crypt_config *cc = ti->private; |
840 | struct crypt_io *io; | 889 | struct crypt_io *io; |
841 | struct convert_context ctx; | ||
842 | struct bio *clone; | ||
843 | unsigned int remaining = bio->bi_size; | ||
844 | sector_t sector = bio->bi_sector - ti->begin; | ||
845 | unsigned int bvec_idx = 0; | ||
846 | 890 | ||
847 | io = mempool_alloc(cc->io_pool, GFP_NOIO); | 891 | io = mempool_alloc(cc->io_pool, GFP_NOIO); |
892 | |||
848 | io->target = ti; | 893 | io->target = ti; |
849 | io->base_bio = bio; | 894 | io->base_bio = bio; |
850 | io->first_clone = NULL; | 895 | io->first_clone = NULL; |
851 | io->error = 0; | 896 | io->error = 0; |
852 | atomic_set(&io->pending, 1); /* hold a reference */ | 897 | atomic_set(&io->pending, 0); |
853 | 898 | ||
854 | if (bio_data_dir(bio) == WRITE) | 899 | if (bio_data_dir(bio) == WRITE) |
855 | crypt_convert_init(cc, &ctx, NULL, bio, sector, 1); | 900 | return process_write(io); |
856 | |||
857 | /* | ||
858 | * The allocated buffers can be smaller than the whole bio, | ||
859 | * so repeat the whole process until all the data can be handled. | ||
860 | */ | ||
861 | while (remaining) { | ||
862 | if (bio_data_dir(bio) == WRITE) | ||
863 | clone = clone_write(io, sector, &bvec_idx, &ctx); | ||
864 | else | ||
865 | clone = clone_read(io, sector); | ||
866 | if (!clone) | ||
867 | goto cleanup; | ||
868 | |||
869 | if (!io->first_clone) { | ||
870 | /* | ||
871 | * hold a reference to the first clone, because it | ||
872 | * holds the bio_vec array and that can't be freed | ||
873 | * before all other clones are released | ||
874 | */ | ||
875 | bio_get(clone); | ||
876 | io->first_clone = clone; | ||
877 | } | ||
878 | atomic_inc(&io->pending); | ||
879 | |||
880 | remaining -= clone->bi_size; | ||
881 | sector += bio_sectors(clone); | ||
882 | |||
883 | generic_make_request(clone); | ||
884 | |||
885 | /* out of memory -> run queues */ | ||
886 | if (remaining) | ||
887 | blk_congestion_wait(bio_data_dir(clone), HZ/100); | ||
888 | } | ||
889 | 901 | ||
890 | /* drop reference, clones could have returned before we reach this */ | 902 | return process_read(io); |
891 | dec_pending(io, 0); | ||
892 | return 0; | ||
893 | |||
894 | cleanup: | ||
895 | if (io->first_clone) { | ||
896 | dec_pending(io, -ENOMEM); | ||
897 | return 0; | ||
898 | } | ||
899 | |||
900 | /* if no bio has been dispatched yet, we can directly return the error */ | ||
901 | mempool_free(io, cc->io_pool); | ||
902 | return -ENOMEM; | ||
903 | } | 903 | } |
904 | 904 | ||
905 | static int crypt_status(struct dm_target *ti, status_type_t type, | 905 | static int crypt_status(struct dm_target *ti, status_type_t type, |