diff options
author | Mikulas Patocka <mpatocka@redhat.com> | 2014-03-28 15:51:55 -0400 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2014-08-01 12:30:35 -0400 |
commit | 298a9fa08a1577211d42a75e8fc073baef61e0d9 (patch) | |
tree | e327790b5722d41e79701ed788e99d2ac40b5222 /drivers/md | |
parent | 6a2414836154dc22b224c837ad7b862f78d595d1 (diff) |
dm crypt: use per-bio data
Change dm-crypt so that it uses auxiliary data allocated with the bio.
Dm-crypt requires two allocations per request - struct dm_crypt_io and
struct ablkcipher_request (with other data appended to it). It
previously only used mempool allocations.
Some requests may require more dm_crypt_ios and ablkcipher_requests,
however most requests need just one of each of these two structures to
complete.
This patch changes it so that the first dm_crypt_io and ablkcipher_request
are allocated with the bio (using target per_bio_data_size option). If
the request needs additional values, they are allocated from the mempool.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-crypt.c | 41 |
1 files changed, 27 insertions, 14 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 4cba2d808afb..2785007e0e46 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -59,7 +59,7 @@ struct dm_crypt_io { | |||
59 | int error; | 59 | int error; |
60 | sector_t sector; | 60 | sector_t sector; |
61 | struct dm_crypt_io *base_io; | 61 | struct dm_crypt_io *base_io; |
62 | }; | 62 | } CRYPTO_MINALIGN_ATTR; |
63 | 63 | ||
64 | struct dm_crypt_request { | 64 | struct dm_crypt_request { |
65 | struct convert_context *ctx; | 65 | struct convert_context *ctx; |
@@ -162,6 +162,8 @@ struct crypt_config { | |||
162 | */ | 162 | */ |
163 | unsigned int dmreq_start; | 163 | unsigned int dmreq_start; |
164 | 164 | ||
165 | unsigned int per_bio_data_size; | ||
166 | |||
165 | unsigned long flags; | 167 | unsigned long flags; |
166 | unsigned int key_size; | 168 | unsigned int key_size; |
167 | unsigned int key_parts; /* independent parts in key buffer */ | 169 | unsigned int key_parts; /* independent parts in key buffer */ |
@@ -895,6 +897,15 @@ static void crypt_alloc_req(struct crypt_config *cc, | |||
895 | kcryptd_async_done, dmreq_of_req(cc, ctx->req)); | 897 | kcryptd_async_done, dmreq_of_req(cc, ctx->req)); |
896 | } | 898 | } |
897 | 899 | ||
900 | static void crypt_free_req(struct crypt_config *cc, | ||
901 | struct ablkcipher_request *req, struct bio *base_bio) | ||
902 | { | ||
903 | struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size); | ||
904 | |||
905 | if ((struct ablkcipher_request *)(io + 1) != req) | ||
906 | mempool_free(req, cc->req_pool); | ||
907 | } | ||
908 | |||
898 | /* | 909 | /* |
899 | * Encrypt / decrypt data from one bio to another one (can be the same one) | 910 | * Encrypt / decrypt data from one bio to another one (can be the same one) |
900 | */ | 911 | */ |
@@ -1008,12 +1019,9 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) | |||
1008 | } | 1019 | } |
1009 | } | 1020 | } |
1010 | 1021 | ||
1011 | static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc, | 1022 | static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc, |
1012 | struct bio *bio, sector_t sector) | 1023 | struct bio *bio, sector_t sector) |
1013 | { | 1024 | { |
1014 | struct dm_crypt_io *io; | ||
1015 | |||
1016 | io = mempool_alloc(cc->io_pool, GFP_NOIO); | ||
1017 | io->cc = cc; | 1025 | io->cc = cc; |
1018 | io->base_bio = bio; | 1026 | io->base_bio = bio; |
1019 | io->sector = sector; | 1027 | io->sector = sector; |
@@ -1021,8 +1029,6 @@ static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc, | |||
1021 | io->base_io = NULL; | 1029 | io->base_io = NULL; |
1022 | io->ctx.req = NULL; | 1030 | io->ctx.req = NULL; |
1023 | atomic_set(&io->io_pending, 0); | 1031 | atomic_set(&io->io_pending, 0); |
1024 | |||
1025 | return io; | ||
1026 | } | 1032 | } |
1027 | 1033 | ||
1028 | static void crypt_inc_pending(struct dm_crypt_io *io) | 1034 | static void crypt_inc_pending(struct dm_crypt_io *io) |
@@ -1046,8 +1052,9 @@ static void crypt_dec_pending(struct dm_crypt_io *io) | |||
1046 | return; | 1052 | return; |
1047 | 1053 | ||
1048 | if (io->ctx.req) | 1054 | if (io->ctx.req) |
1049 | mempool_free(io->ctx.req, cc->req_pool); | 1055 | crypt_free_req(cc, io->ctx.req, base_bio); |
1050 | mempool_free(io, cc->io_pool); | 1056 | if (io != dm_per_bio_data(base_bio, cc->per_bio_data_size)) |
1057 | mempool_free(io, cc->io_pool); | ||
1051 | 1058 | ||
1052 | if (likely(!base_io)) | 1059 | if (likely(!base_io)) |
1053 | bio_endio(base_bio, error); | 1060 | bio_endio(base_bio, error); |
@@ -1255,8 +1262,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) | |||
1255 | * between fragments, so switch to a new dm_crypt_io structure. | 1262 | * between fragments, so switch to a new dm_crypt_io structure. |
1256 | */ | 1263 | */ |
1257 | if (unlikely(!crypt_finished && remaining)) { | 1264 | if (unlikely(!crypt_finished && remaining)) { |
1258 | new_io = crypt_io_alloc(io->cc, io->base_bio, | 1265 | new_io = mempool_alloc(cc->io_pool, GFP_NOIO); |
1259 | sector); | 1266 | crypt_io_init(new_io, io->cc, io->base_bio, sector); |
1260 | crypt_inc_pending(new_io); | 1267 | crypt_inc_pending(new_io); |
1261 | crypt_convert_init(cc, &new_io->ctx, NULL, | 1268 | crypt_convert_init(cc, &new_io->ctx, NULL, |
1262 | io->base_bio, sector); | 1269 | io->base_bio, sector); |
@@ -1325,7 +1332,7 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, | |||
1325 | if (error < 0) | 1332 | if (error < 0) |
1326 | io->error = -EIO; | 1333 | io->error = -EIO; |
1327 | 1334 | ||
1328 | mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); | 1335 | crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); |
1329 | 1336 | ||
1330 | if (!atomic_dec_and_test(&ctx->cc_pending)) | 1337 | if (!atomic_dec_and_test(&ctx->cc_pending)) |
1331 | return; | 1338 | return; |
@@ -1728,6 +1735,10 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
1728 | goto bad; | 1735 | goto bad; |
1729 | } | 1736 | } |
1730 | 1737 | ||
1738 | cc->per_bio_data_size = ti->per_bio_data_size = | ||
1739 | sizeof(struct dm_crypt_io) + cc->dmreq_start + | ||
1740 | sizeof(struct dm_crypt_request) + cc->iv_size; | ||
1741 | |||
1731 | cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); | 1742 | cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); |
1732 | if (!cc->page_pool) { | 1743 | if (!cc->page_pool) { |
1733 | ti->error = "Cannot allocate page mempool"; | 1744 | ti->error = "Cannot allocate page mempool"; |
@@ -1824,7 +1835,9 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) | |||
1824 | return DM_MAPIO_REMAPPED; | 1835 | return DM_MAPIO_REMAPPED; |
1825 | } | 1836 | } |
1826 | 1837 | ||
1827 | io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); | 1838 | io = dm_per_bio_data(bio, cc->per_bio_data_size); |
1839 | crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); | ||
1840 | io->ctx.req = (struct ablkcipher_request *)(io + 1); | ||
1828 | 1841 | ||
1829 | if (bio_data_dir(io->base_bio) == READ) { | 1842 | if (bio_data_dir(io->base_bio) == READ) { |
1830 | if (kcryptd_io_read(io, GFP_NOWAIT)) | 1843 | if (kcryptd_io_read(io, GFP_NOWAIT)) |