diff options
author | Alasdair G Kergon <agk@redhat.com> | 2012-07-27 10:08:05 -0400 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2012-07-27 10:08:05 -0400 |
commit | 49a8a9204bb17296725058bbc7f31092d256be6e (patch) | |
tree | e63780cb17dad2ef375001833d73250c9adc07bb /drivers/md/dm-crypt.c | |
parent | fd2d231faf3ca25584d2320fdcd5a8b202342e46 (diff) |
dm crypt: store crypt_config instead of dm_target struct
Store the crypt_config struct pointer directly in struct dm_crypt_io
instead of the dm_target struct pointer.
Target information is never used - only target->private is referenced,
thus we can change it to point directly to struct crypt_config.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r-- | drivers/md/dm-crypt.c | 38 |
1 files changed, 18 insertions, 20 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 711e0ac58665..8298eaee2c5a 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -50,7 +50,7 @@ struct convert_context { | |||
50 | * per bio private data | 50 | * per bio private data |
51 | */ | 51 | */ |
52 | struct dm_crypt_io { | 52 | struct dm_crypt_io { |
53 | struct dm_target *target; | 53 | struct crypt_config *cc; |
54 | struct bio *base_bio; | 54 | struct bio *base_bio; |
55 | struct work_struct work; | 55 | struct work_struct work; |
56 | 56 | ||
@@ -801,7 +801,7 @@ static int crypt_convert(struct crypt_config *cc, | |||
801 | static void dm_crypt_bio_destructor(struct bio *bio) | 801 | static void dm_crypt_bio_destructor(struct bio *bio) |
802 | { | 802 | { |
803 | struct dm_crypt_io *io = bio->bi_private; | 803 | struct dm_crypt_io *io = bio->bi_private; |
804 | struct crypt_config *cc = io->target->private; | 804 | struct crypt_config *cc = io->cc; |
805 | 805 | ||
806 | bio_free(bio, cc->bs); | 806 | bio_free(bio, cc->bs); |
807 | } | 807 | } |
@@ -815,7 +815,7 @@ static void dm_crypt_bio_destructor(struct bio *bio) | |||
815 | static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, | 815 | static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, |
816 | unsigned *out_of_pages) | 816 | unsigned *out_of_pages) |
817 | { | 817 | { |
818 | struct crypt_config *cc = io->target->private; | 818 | struct crypt_config *cc = io->cc; |
819 | struct bio *clone; | 819 | struct bio *clone; |
820 | unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | 820 | unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
821 | gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; | 821 | gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; |
@@ -874,14 +874,13 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) | |||
874 | } | 874 | } |
875 | } | 875 | } |
876 | 876 | ||
877 | static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti, | 877 | static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc, |
878 | struct bio *bio, sector_t sector) | 878 | struct bio *bio, sector_t sector) |
879 | { | 879 | { |
880 | struct crypt_config *cc = ti->private; | ||
881 | struct dm_crypt_io *io; | 880 | struct dm_crypt_io *io; |
882 | 881 | ||
883 | io = mempool_alloc(cc->io_pool, GFP_NOIO); | 882 | io = mempool_alloc(cc->io_pool, GFP_NOIO); |
884 | io->target = ti; | 883 | io->cc = cc; |
885 | io->base_bio = bio; | 884 | io->base_bio = bio; |
886 | io->sector = sector; | 885 | io->sector = sector; |
887 | io->error = 0; | 886 | io->error = 0; |
@@ -903,7 +902,7 @@ static void crypt_inc_pending(struct dm_crypt_io *io) | |||
903 | */ | 902 | */ |
904 | static void crypt_dec_pending(struct dm_crypt_io *io) | 903 | static void crypt_dec_pending(struct dm_crypt_io *io) |
905 | { | 904 | { |
906 | struct crypt_config *cc = io->target->private; | 905 | struct crypt_config *cc = io->cc; |
907 | struct bio *base_bio = io->base_bio; | 906 | struct bio *base_bio = io->base_bio; |
908 | struct dm_crypt_io *base_io = io->base_io; | 907 | struct dm_crypt_io *base_io = io->base_io; |
909 | int error = io->error; | 908 | int error = io->error; |
@@ -942,7 +941,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io) | |||
942 | static void crypt_endio(struct bio *clone, int error) | 941 | static void crypt_endio(struct bio *clone, int error) |
943 | { | 942 | { |
944 | struct dm_crypt_io *io = clone->bi_private; | 943 | struct dm_crypt_io *io = clone->bi_private; |
945 | struct crypt_config *cc = io->target->private; | 944 | struct crypt_config *cc = io->cc; |
946 | unsigned rw = bio_data_dir(clone); | 945 | unsigned rw = bio_data_dir(clone); |
947 | 946 | ||
948 | if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error)) | 947 | if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error)) |
@@ -969,7 +968,7 @@ static void crypt_endio(struct bio *clone, int error) | |||
969 | 968 | ||
970 | static void clone_init(struct dm_crypt_io *io, struct bio *clone) | 969 | static void clone_init(struct dm_crypt_io *io, struct bio *clone) |
971 | { | 970 | { |
972 | struct crypt_config *cc = io->target->private; | 971 | struct crypt_config *cc = io->cc; |
973 | 972 | ||
974 | clone->bi_private = io; | 973 | clone->bi_private = io; |
975 | clone->bi_end_io = crypt_endio; | 974 | clone->bi_end_io = crypt_endio; |
@@ -980,7 +979,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone) | |||
980 | 979 | ||
981 | static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) | 980 | static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) |
982 | { | 981 | { |
983 | struct crypt_config *cc = io->target->private; | 982 | struct crypt_config *cc = io->cc; |
984 | struct bio *base_bio = io->base_bio; | 983 | struct bio *base_bio = io->base_bio; |
985 | struct bio *clone; | 984 | struct bio *clone; |
986 | 985 | ||
@@ -1028,7 +1027,7 @@ static void kcryptd_io(struct work_struct *work) | |||
1028 | 1027 | ||
1029 | static void kcryptd_queue_io(struct dm_crypt_io *io) | 1028 | static void kcryptd_queue_io(struct dm_crypt_io *io) |
1030 | { | 1029 | { |
1031 | struct crypt_config *cc = io->target->private; | 1030 | struct crypt_config *cc = io->cc; |
1032 | 1031 | ||
1033 | INIT_WORK(&io->work, kcryptd_io); | 1032 | INIT_WORK(&io->work, kcryptd_io); |
1034 | queue_work(cc->io_queue, &io->work); | 1033 | queue_work(cc->io_queue, &io->work); |
@@ -1037,7 +1036,7 @@ static void kcryptd_queue_io(struct dm_crypt_io *io) | |||
1037 | static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) | 1036 | static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) |
1038 | { | 1037 | { |
1039 | struct bio *clone = io->ctx.bio_out; | 1038 | struct bio *clone = io->ctx.bio_out; |
1040 | struct crypt_config *cc = io->target->private; | 1039 | struct crypt_config *cc = io->cc; |
1041 | 1040 | ||
1042 | if (unlikely(io->error < 0)) { | 1041 | if (unlikely(io->error < 0)) { |
1043 | crypt_free_buffer_pages(cc, clone); | 1042 | crypt_free_buffer_pages(cc, clone); |
@@ -1059,7 +1058,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) | |||
1059 | 1058 | ||
1060 | static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) | 1059 | static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) |
1061 | { | 1060 | { |
1062 | struct crypt_config *cc = io->target->private; | 1061 | struct crypt_config *cc = io->cc; |
1063 | struct bio *clone; | 1062 | struct bio *clone; |
1064 | struct dm_crypt_io *new_io; | 1063 | struct dm_crypt_io *new_io; |
1065 | int crypt_finished; | 1064 | int crypt_finished; |
@@ -1125,7 +1124,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) | |||
1125 | * between fragments, so switch to a new dm_crypt_io structure. | 1124 | * between fragments, so switch to a new dm_crypt_io structure. |
1126 | */ | 1125 | */ |
1127 | if (unlikely(!crypt_finished && remaining)) { | 1126 | if (unlikely(!crypt_finished && remaining)) { |
1128 | new_io = crypt_io_alloc(io->target, io->base_bio, | 1127 | new_io = crypt_io_alloc(io->cc, io->base_bio, |
1129 | sector); | 1128 | sector); |
1130 | crypt_inc_pending(new_io); | 1129 | crypt_inc_pending(new_io); |
1131 | crypt_convert_init(cc, &new_io->ctx, NULL, | 1130 | crypt_convert_init(cc, &new_io->ctx, NULL, |
@@ -1159,7 +1158,7 @@ static void kcryptd_crypt_read_done(struct dm_crypt_io *io) | |||
1159 | 1158 | ||
1160 | static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) | 1159 | static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) |
1161 | { | 1160 | { |
1162 | struct crypt_config *cc = io->target->private; | 1161 | struct crypt_config *cc = io->cc; |
1163 | int r = 0; | 1162 | int r = 0; |
1164 | 1163 | ||
1165 | crypt_inc_pending(io); | 1164 | crypt_inc_pending(io); |
@@ -1183,7 +1182,7 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, | |||
1183 | struct dm_crypt_request *dmreq = async_req->data; | 1182 | struct dm_crypt_request *dmreq = async_req->data; |
1184 | struct convert_context *ctx = dmreq->ctx; | 1183 | struct convert_context *ctx = dmreq->ctx; |
1185 | struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); | 1184 | struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); |
1186 | struct crypt_config *cc = io->target->private; | 1185 | struct crypt_config *cc = io->cc; |
1187 | 1186 | ||
1188 | if (error == -EINPROGRESS) { | 1187 | if (error == -EINPROGRESS) { |
1189 | complete(&ctx->restart); | 1188 | complete(&ctx->restart); |
@@ -1219,7 +1218,7 @@ static void kcryptd_crypt(struct work_struct *work) | |||
1219 | 1218 | ||
1220 | static void kcryptd_queue_crypt(struct dm_crypt_io *io) | 1219 | static void kcryptd_queue_crypt(struct dm_crypt_io *io) |
1221 | { | 1220 | { |
1222 | struct crypt_config *cc = io->target->private; | 1221 | struct crypt_config *cc = io->cc; |
1223 | 1222 | ||
1224 | INIT_WORK(&io->work, kcryptd_crypt); | 1223 | INIT_WORK(&io->work, kcryptd_crypt); |
1225 | queue_work(cc->crypt_queue, &io->work); | 1224 | queue_work(cc->crypt_queue, &io->work); |
@@ -1708,7 +1707,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, | |||
1708 | union map_info *map_context) | 1707 | union map_info *map_context) |
1709 | { | 1708 | { |
1710 | struct dm_crypt_io *io; | 1709 | struct dm_crypt_io *io; |
1711 | struct crypt_config *cc; | 1710 | struct crypt_config *cc = ti->private; |
1712 | 1711 | ||
1713 | /* | 1712 | /* |
1714 | * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues. | 1713 | * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues. |
@@ -1716,14 +1715,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, | |||
1716 | * - for REQ_DISCARD caller must use flush if IO ordering matters | 1715 | * - for REQ_DISCARD caller must use flush if IO ordering matters |
1717 | */ | 1716 | */ |
1718 | if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) { | 1717 | if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) { |
1719 | cc = ti->private; | ||
1720 | bio->bi_bdev = cc->dev->bdev; | 1718 | bio->bi_bdev = cc->dev->bdev; |
1721 | if (bio_sectors(bio)) | 1719 | if (bio_sectors(bio)) |
1722 | bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector); | 1720 | bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector); |
1723 | return DM_MAPIO_REMAPPED; | 1721 | return DM_MAPIO_REMAPPED; |
1724 | } | 1722 | } |
1725 | 1723 | ||
1726 | io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector)); | 1724 | io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector)); |
1727 | 1725 | ||
1728 | if (bio_data_dir(io->base_bio) == READ) { | 1726 | if (bio_data_dir(io->base_bio) == READ) { |
1729 | if (kcryptd_io_read(io, GFP_NOWAIT)) | 1727 | if (kcryptd_io_read(io, GFP_NOWAIT)) |