diff options
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r-- | drivers/md/dm-crypt.c | 61 |
1 files changed, 12 insertions, 49 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 784695d22fde..53b213226c01 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/crypto.h> | 19 | #include <linux/crypto.h> |
20 | #include <linux/workqueue.h> | 20 | #include <linux/workqueue.h> |
21 | #include <linux/backing-dev.h> | 21 | #include <linux/backing-dev.h> |
22 | #include <linux/percpu.h> | ||
23 | #include <linux/atomic.h> | 22 | #include <linux/atomic.h> |
24 | #include <linux/scatterlist.h> | 23 | #include <linux/scatterlist.h> |
25 | #include <asm/page.h> | 24 | #include <asm/page.h> |
@@ -43,6 +42,7 @@ struct convert_context { | |||
43 | struct bvec_iter iter_out; | 42 | struct bvec_iter iter_out; |
44 | sector_t cc_sector; | 43 | sector_t cc_sector; |
45 | atomic_t cc_pending; | 44 | atomic_t cc_pending; |
45 | struct ablkcipher_request *req; | ||
46 | }; | 46 | }; |
47 | 47 | ||
48 | /* | 48 | /* |
@@ -111,15 +111,7 @@ struct iv_tcw_private { | |||
111 | enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; | 111 | enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; |
112 | 112 | ||
113 | /* | 113 | /* |
114 | * Duplicated per-CPU state for cipher. | 114 | * The fields in here must be read only after initialization. |
115 | */ | ||
116 | struct crypt_cpu { | ||
117 | struct ablkcipher_request *req; | ||
118 | }; | ||
119 | |||
120 | /* | ||
121 | * The fields in here must be read only after initialization, | ||
122 | * changing state should be in crypt_cpu. | ||
123 | */ | 115 | */ |
124 | struct crypt_config { | 116 | struct crypt_config { |
125 | struct dm_dev *dev; | 117 | struct dm_dev *dev; |
@@ -150,12 +142,6 @@ struct crypt_config { | |||
150 | sector_t iv_offset; | 142 | sector_t iv_offset; |
151 | unsigned int iv_size; | 143 | unsigned int iv_size; |
152 | 144 | ||
153 | /* | ||
154 | * Duplicated per cpu state. Access through | ||
155 | * per_cpu_ptr() only. | ||
156 | */ | ||
157 | struct crypt_cpu __percpu *cpu; | ||
158 | |||
159 | /* ESSIV: struct crypto_cipher *essiv_tfm */ | 145 | /* ESSIV: struct crypto_cipher *essiv_tfm */ |
160 | void *iv_private; | 146 | void *iv_private; |
161 | struct crypto_ablkcipher **tfms; | 147 | struct crypto_ablkcipher **tfms; |
@@ -192,11 +178,6 @@ static void clone_init(struct dm_crypt_io *, struct bio *); | |||
192 | static void kcryptd_queue_crypt(struct dm_crypt_io *io); | 178 | static void kcryptd_queue_crypt(struct dm_crypt_io *io); |
193 | static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); | 179 | static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); |
194 | 180 | ||
195 | static struct crypt_cpu *this_crypt_config(struct crypt_config *cc) | ||
196 | { | ||
197 | return this_cpu_ptr(cc->cpu); | ||
198 | } | ||
199 | |||
200 | /* | 181 | /* |
201 | * Use this to access cipher attributes that are the same for each CPU. | 182 | * Use this to access cipher attributes that are the same for each CPU. |
202 | */ | 183 | */ |
@@ -903,16 +884,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, | |||
903 | static void crypt_alloc_req(struct crypt_config *cc, | 884 | static void crypt_alloc_req(struct crypt_config *cc, |
904 | struct convert_context *ctx) | 885 | struct convert_context *ctx) |
905 | { | 886 | { |
906 | struct crypt_cpu *this_cc = this_crypt_config(cc); | ||
907 | unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); | 887 | unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); |
908 | 888 | ||
909 | if (!this_cc->req) | 889 | if (!ctx->req) |
910 | this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); | 890 | ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO); |
911 | 891 | ||
912 | ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]); | 892 | ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]); |
913 | ablkcipher_request_set_callback(this_cc->req, | 893 | ablkcipher_request_set_callback(ctx->req, |
914 | CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, | 894 | CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, |
915 | kcryptd_async_done, dmreq_of_req(cc, this_cc->req)); | 895 | kcryptd_async_done, dmreq_of_req(cc, ctx->req)); |
916 | } | 896 | } |
917 | 897 | ||
918 | /* | 898 | /* |
@@ -921,7 +901,6 @@ static void crypt_alloc_req(struct crypt_config *cc, | |||
921 | static int crypt_convert(struct crypt_config *cc, | 901 | static int crypt_convert(struct crypt_config *cc, |
922 | struct convert_context *ctx) | 902 | struct convert_context *ctx) |
923 | { | 903 | { |
924 | struct crypt_cpu *this_cc = this_crypt_config(cc); | ||
925 | int r; | 904 | int r; |
926 | 905 | ||
927 | atomic_set(&ctx->cc_pending, 1); | 906 | atomic_set(&ctx->cc_pending, 1); |
@@ -932,7 +911,7 @@ static int crypt_convert(struct crypt_config *cc, | |||
932 | 911 | ||
933 | atomic_inc(&ctx->cc_pending); | 912 | atomic_inc(&ctx->cc_pending); |
934 | 913 | ||
935 | r = crypt_convert_block(cc, ctx, this_cc->req); | 914 | r = crypt_convert_block(cc, ctx, ctx->req); |
936 | 915 | ||
937 | switch (r) { | 916 | switch (r) { |
938 | /* async */ | 917 | /* async */ |
@@ -941,7 +920,7 @@ static int crypt_convert(struct crypt_config *cc, | |||
941 | reinit_completion(&ctx->restart); | 920 | reinit_completion(&ctx->restart); |
942 | /* fall through*/ | 921 | /* fall through*/ |
943 | case -EINPROGRESS: | 922 | case -EINPROGRESS: |
944 | this_cc->req = NULL; | 923 | ctx->req = NULL; |
945 | ctx->cc_sector++; | 924 | ctx->cc_sector++; |
946 | continue; | 925 | continue; |
947 | 926 | ||
@@ -1040,6 +1019,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc, | |||
1040 | io->sector = sector; | 1019 | io->sector = sector; |
1041 | io->error = 0; | 1020 | io->error = 0; |
1042 | io->base_io = NULL; | 1021 | io->base_io = NULL; |
1022 | io->ctx.req = NULL; | ||
1043 | atomic_set(&io->io_pending, 0); | 1023 | atomic_set(&io->io_pending, 0); |
1044 | 1024 | ||
1045 | return io; | 1025 | return io; |
@@ -1065,6 +1045,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io) | |||
1065 | if (!atomic_dec_and_test(&io->io_pending)) | 1045 | if (!atomic_dec_and_test(&io->io_pending)) |
1066 | return; | 1046 | return; |
1067 | 1047 | ||
1048 | if (io->ctx.req) | ||
1049 | mempool_free(io->ctx.req, cc->req_pool); | ||
1068 | mempool_free(io, cc->io_pool); | 1050 | mempool_free(io, cc->io_pool); |
1069 | 1051 | ||
1070 | if (likely(!base_io)) | 1052 | if (likely(!base_io)) |
@@ -1492,8 +1474,6 @@ static int crypt_wipe_key(struct crypt_config *cc) | |||
1492 | static void crypt_dtr(struct dm_target *ti) | 1474 | static void crypt_dtr(struct dm_target *ti) |
1493 | { | 1475 | { |
1494 | struct crypt_config *cc = ti->private; | 1476 | struct crypt_config *cc = ti->private; |
1495 | struct crypt_cpu *cpu_cc; | ||
1496 | int cpu; | ||
1497 | 1477 | ||
1498 | ti->private = NULL; | 1478 | ti->private = NULL; |
1499 | 1479 | ||
@@ -1505,13 +1485,6 @@ static void crypt_dtr(struct dm_target *ti) | |||
1505 | if (cc->crypt_queue) | 1485 | if (cc->crypt_queue) |
1506 | destroy_workqueue(cc->crypt_queue); | 1486 | destroy_workqueue(cc->crypt_queue); |
1507 | 1487 | ||
1508 | if (cc->cpu) | ||
1509 | for_each_possible_cpu(cpu) { | ||
1510 | cpu_cc = per_cpu_ptr(cc->cpu, cpu); | ||
1511 | if (cpu_cc->req) | ||
1512 | mempool_free(cpu_cc->req, cc->req_pool); | ||
1513 | } | ||
1514 | |||
1515 | crypt_free_tfms(cc); | 1488 | crypt_free_tfms(cc); |
1516 | 1489 | ||
1517 | if (cc->bs) | 1490 | if (cc->bs) |
@@ -1530,9 +1503,6 @@ static void crypt_dtr(struct dm_target *ti) | |||
1530 | if (cc->dev) | 1503 | if (cc->dev) |
1531 | dm_put_device(ti, cc->dev); | 1504 | dm_put_device(ti, cc->dev); |
1532 | 1505 | ||
1533 | if (cc->cpu) | ||
1534 | free_percpu(cc->cpu); | ||
1535 | |||
1536 | kzfree(cc->cipher); | 1506 | kzfree(cc->cipher); |
1537 | kzfree(cc->cipher_string); | 1507 | kzfree(cc->cipher_string); |
1538 | 1508 | ||
@@ -1588,13 +1558,6 @@ static int crypt_ctr_cipher(struct dm_target *ti, | |||
1588 | if (tmp) | 1558 | if (tmp) |
1589 | DMWARN("Ignoring unexpected additional cipher options"); | 1559 | DMWARN("Ignoring unexpected additional cipher options"); |
1590 | 1560 | ||
1591 | cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)), | ||
1592 | __alignof__(struct crypt_cpu)); | ||
1593 | if (!cc->cpu) { | ||
1594 | ti->error = "Cannot allocate per cpu state"; | ||
1595 | goto bad_mem; | ||
1596 | } | ||
1597 | |||
1598 | /* | 1561 | /* |
1599 | * For compatibility with the original dm-crypt mapping format, if | 1562 | * For compatibility with the original dm-crypt mapping format, if |
1600 | * only the cipher name is supplied, use cbc-plain. | 1563 | * only the cipher name is supplied, use cbc-plain. |