diff options
author | Milan Broz <mbroz@redhat.com> | 2011-01-13 14:59:53 -0500 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2011-01-13 14:59:53 -0500 |
commit | 20c82538e4f5ede51bc2b4795bc6e5cae772796d (patch) | |
tree | a70248a06b21bf249382d6f6beac4b8681bf8f35 /drivers/md | |
parent | c029772125594e31eb1a5ad9e0913724ed9891f2 (diff) |
dm crypt: use io thread for reads only if mempool exhausted
If there is enough memory, code can directly submit bio
instead queing this operation in separate thread.
Try to alloc bio clone with GFP_NOWAIT and only if it
fails use separate queue (map function cannot block here).
Signed-off-by: Milan Broz <mbroz@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-crypt.c | 37 |
1 files changed, 23 insertions, 14 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 50ae6ef83738..dc4403bcc6a0 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -787,26 +787,30 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone) | |||
787 | clone->bi_destructor = dm_crypt_bio_destructor; | 787 | clone->bi_destructor = dm_crypt_bio_destructor; |
788 | } | 788 | } |
789 | 789 | ||
790 | static void kcryptd_io_read(struct dm_crypt_io *io) | 790 | static void kcryptd_unplug(struct crypt_config *cc) |
791 | { | ||
792 | blk_unplug(bdev_get_queue(cc->dev->bdev)); | ||
793 | } | ||
794 | |||
795 | static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) | ||
791 | { | 796 | { |
792 | struct crypt_config *cc = io->target->private; | 797 | struct crypt_config *cc = io->target->private; |
793 | struct bio *base_bio = io->base_bio; | 798 | struct bio *base_bio = io->base_bio; |
794 | struct bio *clone; | 799 | struct bio *clone; |
795 | 800 | ||
796 | crypt_inc_pending(io); | ||
797 | |||
798 | /* | 801 | /* |
799 | * The block layer might modify the bvec array, so always | 802 | * The block layer might modify the bvec array, so always |
800 | * copy the required bvecs because we need the original | 803 | * copy the required bvecs because we need the original |
801 | * one in order to decrypt the whole bio data *afterwards*. | 804 | * one in order to decrypt the whole bio data *afterwards*. |
802 | */ | 805 | */ |
803 | clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs); | 806 | clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs); |
804 | if (unlikely(!clone)) { | 807 | if (!clone) { |
805 | io->error = -ENOMEM; | 808 | kcryptd_unplug(cc); |
806 | crypt_dec_pending(io); | 809 | return 1; |
807 | return; | ||
808 | } | 810 | } |
809 | 811 | ||
812 | crypt_inc_pending(io); | ||
813 | |||
810 | clone_init(io, clone); | 814 | clone_init(io, clone); |
811 | clone->bi_idx = 0; | 815 | clone->bi_idx = 0; |
812 | clone->bi_vcnt = bio_segments(base_bio); | 816 | clone->bi_vcnt = bio_segments(base_bio); |
@@ -816,6 +820,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io) | |||
816 | sizeof(struct bio_vec) * clone->bi_vcnt); | 820 | sizeof(struct bio_vec) * clone->bi_vcnt); |
817 | 821 | ||
818 | generic_make_request(clone); | 822 | generic_make_request(clone); |
823 | return 0; | ||
819 | } | 824 | } |
820 | 825 | ||
821 | static void kcryptd_io_write(struct dm_crypt_io *io) | 826 | static void kcryptd_io_write(struct dm_crypt_io *io) |
@@ -828,9 +833,12 @@ static void kcryptd_io(struct work_struct *work) | |||
828 | { | 833 | { |
829 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); | 834 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); |
830 | 835 | ||
831 | if (bio_data_dir(io->base_bio) == READ) | 836 | if (bio_data_dir(io->base_bio) == READ) { |
832 | kcryptd_io_read(io); | 837 | crypt_inc_pending(io); |
833 | else | 838 | if (kcryptd_io_read(io, GFP_NOIO)) |
839 | io->error = -ENOMEM; | ||
840 | crypt_dec_pending(io); | ||
841 | } else | ||
834 | kcryptd_io_write(io); | 842 | kcryptd_io_write(io); |
835 | } | 843 | } |
836 | 844 | ||
@@ -1424,9 +1432,10 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, | |||
1424 | 1432 | ||
1425 | io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector)); | 1433 | io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector)); |
1426 | 1434 | ||
1427 | if (bio_data_dir(io->base_bio) == READ) | 1435 | if (bio_data_dir(io->base_bio) == READ) { |
1428 | kcryptd_queue_io(io); | 1436 | if (kcryptd_io_read(io, GFP_NOWAIT)) |
1429 | else | 1437 | kcryptd_queue_io(io); |
1438 | } else | ||
1430 | kcryptd_queue_crypt(io); | 1439 | kcryptd_queue_crypt(io); |
1431 | 1440 | ||
1432 | return DM_MAPIO_SUBMITTED; | 1441 | return DM_MAPIO_SUBMITTED; |