aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-crypt.c
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-10-11 18:45:43 -0400
committerKent Overstreet <kmo@daterainc.com>2013-11-24 01:33:51 -0500
commit003b5c5719f159f4f4bf97511c4702a0638313dd (patch)
tree1b3cac74e22ae5a87fdb6e3066f2d728913e6e0c /drivers/md/dm-crypt.c
parent458b76ed2f9517becb74dcc8eedd70d3068ea6e4 (diff)
block: Convert drivers to immutable biovecs
Now that we've got a mechanism for immutable biovecs - bi_iter.bi_bvec_done - we need to convert drivers to use primitives that respect it instead of using the bvec array directly. Signed-off-by: Kent Overstreet <kmo@daterainc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: NeilBrown <neilb@suse.de> Cc: Alasdair Kergon <agk@redhat.com> Cc: dm-devel@redhat.com
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r--drivers/md/dm-crypt.c49
1 files changed, 18 insertions, 31 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 1e2e5465d28e..784695d22fde 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -39,10 +39,8 @@ struct convert_context {
39 struct completion restart; 39 struct completion restart;
40 struct bio *bio_in; 40 struct bio *bio_in;
41 struct bio *bio_out; 41 struct bio *bio_out;
42 unsigned int offset_in; 42 struct bvec_iter iter_in;
43 unsigned int offset_out; 43 struct bvec_iter iter_out;
44 unsigned int idx_in;
45 unsigned int idx_out;
46 sector_t cc_sector; 44 sector_t cc_sector;
47 atomic_t cc_pending; 45 atomic_t cc_pending;
48}; 46};
@@ -826,10 +824,10 @@ static void crypt_convert_init(struct crypt_config *cc,
826{ 824{
827 ctx->bio_in = bio_in; 825 ctx->bio_in = bio_in;
828 ctx->bio_out = bio_out; 826 ctx->bio_out = bio_out;
829 ctx->offset_in = 0; 827 if (bio_in)
830 ctx->offset_out = 0; 828 ctx->iter_in = bio_in->bi_iter;
831 ctx->idx_in = bio_in ? bio_in->bi_iter.bi_idx : 0; 829 if (bio_out)
832 ctx->idx_out = bio_out ? bio_out->bi_iter.bi_idx : 0; 830 ctx->iter_out = bio_out->bi_iter;
833 ctx->cc_sector = sector + cc->iv_offset; 831 ctx->cc_sector = sector + cc->iv_offset;
834 init_completion(&ctx->restart); 832 init_completion(&ctx->restart);
835} 833}
@@ -857,8 +855,8 @@ static int crypt_convert_block(struct crypt_config *cc,
857 struct convert_context *ctx, 855 struct convert_context *ctx,
858 struct ablkcipher_request *req) 856 struct ablkcipher_request *req)
859{ 857{
860 struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); 858 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
861 struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); 859 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
862 struct dm_crypt_request *dmreq; 860 struct dm_crypt_request *dmreq;
863 u8 *iv; 861 u8 *iv;
864 int r; 862 int r;
@@ -869,24 +867,15 @@ static int crypt_convert_block(struct crypt_config *cc,
869 dmreq->iv_sector = ctx->cc_sector; 867 dmreq->iv_sector = ctx->cc_sector;
870 dmreq->ctx = ctx; 868 dmreq->ctx = ctx;
871 sg_init_table(&dmreq->sg_in, 1); 869 sg_init_table(&dmreq->sg_in, 1);
872 sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, 870 sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT,
873 bv_in->bv_offset + ctx->offset_in); 871 bv_in.bv_offset);
874 872
875 sg_init_table(&dmreq->sg_out, 1); 873 sg_init_table(&dmreq->sg_out, 1);
876 sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, 874 sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT,
877 bv_out->bv_offset + ctx->offset_out); 875 bv_out.bv_offset);
878 876
879 ctx->offset_in += 1 << SECTOR_SHIFT; 877 bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT);
880 if (ctx->offset_in >= bv_in->bv_len) { 878 bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT);
881 ctx->offset_in = 0;
882 ctx->idx_in++;
883 }
884
885 ctx->offset_out += 1 << SECTOR_SHIFT;
886 if (ctx->offset_out >= bv_out->bv_len) {
887 ctx->offset_out = 0;
888 ctx->idx_out++;
889 }
890 879
891 if (cc->iv_gen_ops) { 880 if (cc->iv_gen_ops) {
892 r = cc->iv_gen_ops->generator(cc, iv, dmreq); 881 r = cc->iv_gen_ops->generator(cc, iv, dmreq);
@@ -937,8 +926,7 @@ static int crypt_convert(struct crypt_config *cc,
937 926
938 atomic_set(&ctx->cc_pending, 1); 927 atomic_set(&ctx->cc_pending, 1);
939 928
940 while(ctx->idx_in < ctx->bio_in->bi_vcnt && 929 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
941 ctx->idx_out < ctx->bio_out->bi_vcnt) {
942 930
943 crypt_alloc_req(cc, ctx); 931 crypt_alloc_req(cc, ctx);
944 932
@@ -1207,7 +1195,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
1207 } 1195 }
1208 1196
1209 /* crypt_convert should have filled the clone bio */ 1197 /* crypt_convert should have filled the clone bio */
1210 BUG_ON(io->ctx.idx_out < clone->bi_vcnt); 1198 BUG_ON(io->ctx.iter_out.bi_size);
1211 1199
1212 clone->bi_iter.bi_sector = cc->start + io->sector; 1200 clone->bi_iter.bi_sector = cc->start + io->sector;
1213 1201
@@ -1246,7 +1234,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1246 } 1234 }
1247 1235
1248 io->ctx.bio_out = clone; 1236 io->ctx.bio_out = clone;
1249 io->ctx.idx_out = 0; 1237 io->ctx.iter_out = clone->bi_iter;
1250 1238
1251 remaining -= clone->bi_iter.bi_size; 1239 remaining -= clone->bi_iter.bi_size;
1252 sector += bio_sectors(clone); 1240 sector += bio_sectors(clone);
@@ -1290,8 +1278,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1290 crypt_inc_pending(new_io); 1278 crypt_inc_pending(new_io);
1291 crypt_convert_init(cc, &new_io->ctx, NULL, 1279 crypt_convert_init(cc, &new_io->ctx, NULL,
1292 io->base_bio, sector); 1280 io->base_bio, sector);
1293 new_io->ctx.idx_in = io->ctx.idx_in; 1281 new_io->ctx.iter_in = io->ctx.iter_in;
1294 new_io->ctx.offset_in = io->ctx.offset_in;
1295 1282
1296 /* 1283 /*
1297 * Fragments after the first use the base_io 1284 * Fragments after the first use the base_io