aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-crypt.c
diff options
context:
space:
mode:
authorAndi Kleen <ak@linux.intel.com>2011-01-13 14:59:53 -0500
committerAlasdair G Kergon <agk@redhat.com>2011-01-13 14:59:53 -0500
commitc029772125594e31eb1a5ad9e0913724ed9891f2 (patch)
treef97dc6534b0d3af9447002fe7d08ff25dfffed92 /drivers/md/dm-crypt.c
parent7dbcd137414f3877737802438926d6dba7906a9a (diff)
dm crypt: scale to multiple cpus
Currently dm-crypt does all the encryption work for a single dm-crypt mapping in a single workqueue. This does not scale well when multiple CPUs are submitting IO at a high rate. The single CPU running the single thread cannot keep up with the encryption and encrypted IO performance tanks. This patch changes the crypto workqueue to be per CPU. This means that as long as the IO submitter (or the interrupt target CPUs for reads) runs on different CPUs the encryption work will be also parallel. To avoid a bottleneck on the IO worker I also changed those to be per-CPU threads. There is still some shared data, so I suspect some bouncing cache lines. But I haven't done a detailed study on that yet. Signed-off-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Milan Broz <mbroz@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r--drivers/md/dm-crypt.c254
1 files changed, 196 insertions, 58 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 9a896e1cb2ea..50ae6ef83738 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -18,6 +18,7 @@
18#include <linux/crypto.h> 18#include <linux/crypto.h>
19#include <linux/workqueue.h> 19#include <linux/workqueue.h>
20#include <linux/backing-dev.h> 20#include <linux/backing-dev.h>
21#include <linux/percpu.h>
21#include <asm/atomic.h> 22#include <asm/atomic.h>
22#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
23#include <asm/page.h> 24#include <asm/page.h>
@@ -77,7 +78,6 @@ struct crypt_iv_operations {
77}; 78};
78 79
79struct iv_essiv_private { 80struct iv_essiv_private {
80 struct crypto_cipher *tfm;
81 struct crypto_hash *hash_tfm; 81 struct crypto_hash *hash_tfm;
82 u8 *salt; 82 u8 *salt;
83}; 83};
@@ -91,6 +91,22 @@ struct iv_benbi_private {
91 * and encrypts / decrypts at the same time. 91 * and encrypts / decrypts at the same time.
92 */ 92 */
93enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; 93enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
94
95/*
96 * Duplicated per-CPU state for cipher.
97 */
98struct crypt_cpu {
99 struct ablkcipher_request *req;
100 struct crypto_ablkcipher *tfm;
101
102 /* ESSIV: struct crypto_cipher *essiv_tfm */
103 void *iv_private;
104};
105
106/*
107 * The fields in here must be read only after initialization,
108 * changing state should be in crypt_cpu.
109 */
94struct crypt_config { 110struct crypt_config {
95 struct dm_dev *dev; 111 struct dm_dev *dev;
96 sector_t start; 112 sector_t start;
@@ -119,6 +135,12 @@ struct crypt_config {
119 unsigned int iv_size; 135 unsigned int iv_size;
120 136
121 /* 137 /*
138 * Duplicated per cpu state. Access through
139 * per_cpu_ptr() only.
140 */
141 struct crypt_cpu __percpu *cpu;
142
143 /*
122 * Layout of each crypto request: 144 * Layout of each crypto request:
123 * 145 *
124 * struct ablkcipher_request 146 * struct ablkcipher_request
@@ -132,9 +154,7 @@ struct crypt_config {
132 * correctly aligned. 154 * correctly aligned.
133 */ 155 */
134 unsigned int dmreq_start; 156 unsigned int dmreq_start;
135 struct ablkcipher_request *req;
136 157
137 struct crypto_ablkcipher *tfm;
138 unsigned long flags; 158 unsigned long flags;
139 unsigned int key_size; 159 unsigned int key_size;
140 u8 key[0]; 160 u8 key[0];
@@ -149,6 +169,19 @@ static struct kmem_cache *_crypt_io_pool;
149static void clone_init(struct dm_crypt_io *, struct bio *); 169static void clone_init(struct dm_crypt_io *, struct bio *);
150static void kcryptd_queue_crypt(struct dm_crypt_io *io); 170static void kcryptd_queue_crypt(struct dm_crypt_io *io);
151 171
172static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
173{
174 return this_cpu_ptr(cc->cpu);
175}
176
177/*
178 * Use this to access cipher attributes that are the same for each CPU.
179 */
180static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
181{
182 return __this_cpu_ptr(cc->cpu)->tfm;
183}
184
152/* 185/*
153 * Different IV generation algorithms: 186 * Different IV generation algorithms:
154 * 187 *
@@ -195,7 +228,8 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
195 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 228 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
196 struct hash_desc desc; 229 struct hash_desc desc;
197 struct scatterlist sg; 230 struct scatterlist sg;
198 int err; 231 struct crypto_cipher *essiv_tfm;
232 int err, cpu;
199 233
200 sg_init_one(&sg, cc->key, cc->key_size); 234 sg_init_one(&sg, cc->key, cc->key_size);
201 desc.tfm = essiv->hash_tfm; 235 desc.tfm = essiv->hash_tfm;
@@ -205,8 +239,16 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
205 if (err) 239 if (err)
206 return err; 240 return err;
207 241
208 return crypto_cipher_setkey(essiv->tfm, essiv->salt, 242 for_each_possible_cpu(cpu) {
243 essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private,
244
245 err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
209 crypto_hash_digestsize(essiv->hash_tfm)); 246 crypto_hash_digestsize(essiv->hash_tfm));
247 if (err)
248 return err;
249 }
250
251 return 0;
210} 252}
211 253
212/* Wipe salt and reset key derived from volume key */ 254/* Wipe salt and reset key derived from volume key */
@@ -214,24 +256,76 @@ static int crypt_iv_essiv_wipe(struct crypt_config *cc)
214{ 256{
215 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 257 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
216 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm); 258 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
259 struct crypto_cipher *essiv_tfm;
260 int cpu, r, err = 0;
217 261
218 memset(essiv->salt, 0, salt_size); 262 memset(essiv->salt, 0, salt_size);
219 263
220 return crypto_cipher_setkey(essiv->tfm, essiv->salt, salt_size); 264 for_each_possible_cpu(cpu) {
265 essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private;
266 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
267 if (r)
268 err = r;
269 }
270
271 return err;
272}
273
274/* Set up per cpu cipher state */
275static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
276 struct dm_target *ti,
277 u8 *salt, unsigned saltsize)
278{
279 struct crypto_cipher *essiv_tfm;
280 int err;
281
282 /* Setup the essiv_tfm with the given salt */
283 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
284 if (IS_ERR(essiv_tfm)) {
285 ti->error = "Error allocating crypto tfm for ESSIV";
286 return essiv_tfm;
287 }
288
289 if (crypto_cipher_blocksize(essiv_tfm) !=
290 crypto_ablkcipher_ivsize(any_tfm(cc))) {
291 ti->error = "Block size of ESSIV cipher does "
292 "not match IV size of block cipher";
293 crypto_free_cipher(essiv_tfm);
294 return ERR_PTR(-EINVAL);
295 }
296
297 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
298 if (err) {
299 ti->error = "Failed to set key for ESSIV cipher";
300 crypto_free_cipher(essiv_tfm);
301 return ERR_PTR(err);
302 }
303
304 return essiv_tfm;
221} 305}
222 306
223static void crypt_iv_essiv_dtr(struct crypt_config *cc) 307static void crypt_iv_essiv_dtr(struct crypt_config *cc)
224{ 308{
309 int cpu;
310 struct crypt_cpu *cpu_cc;
311 struct crypto_cipher *essiv_tfm;
225 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 312 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
226 313
227 crypto_free_cipher(essiv->tfm);
228 essiv->tfm = NULL;
229
230 crypto_free_hash(essiv->hash_tfm); 314 crypto_free_hash(essiv->hash_tfm);
231 essiv->hash_tfm = NULL; 315 essiv->hash_tfm = NULL;
232 316
233 kzfree(essiv->salt); 317 kzfree(essiv->salt);
234 essiv->salt = NULL; 318 essiv->salt = NULL;
319
320 for_each_possible_cpu(cpu) {
321 cpu_cc = per_cpu_ptr(cc->cpu, cpu);
322 essiv_tfm = cpu_cc->iv_private;
323
324 if (essiv_tfm)
325 crypto_free_cipher(essiv_tfm);
326
327 cpu_cc->iv_private = NULL;
328 }
235} 329}
236 330
237static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, 331static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
@@ -240,7 +334,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
240 struct crypto_cipher *essiv_tfm = NULL; 334 struct crypto_cipher *essiv_tfm = NULL;
241 struct crypto_hash *hash_tfm = NULL; 335 struct crypto_hash *hash_tfm = NULL;
242 u8 *salt = NULL; 336 u8 *salt = NULL;
243 int err; 337 int err, cpu;
244 338
245 if (!opts) { 339 if (!opts) {
246 ti->error = "Digest algorithm missing for ESSIV mode"; 340 ti->error = "Digest algorithm missing for ESSIV mode";
@@ -262,30 +356,22 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
262 goto bad; 356 goto bad;
263 } 357 }
264 358
265 /* Allocate essiv_tfm */
266 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
267 if (IS_ERR(essiv_tfm)) {
268 ti->error = "Error allocating crypto tfm for ESSIV";
269 err = PTR_ERR(essiv_tfm);
270 goto bad;
271 }
272 if (crypto_cipher_blocksize(essiv_tfm) !=
273 crypto_ablkcipher_ivsize(cc->tfm)) {
274 ti->error = "Block size of ESSIV cipher does "
275 "not match IV size of block cipher";
276 err = -EINVAL;
277 goto bad;
278 }
279
280 cc->iv_gen_private.essiv.salt = salt; 359 cc->iv_gen_private.essiv.salt = salt;
281 cc->iv_gen_private.essiv.tfm = essiv_tfm;
282 cc->iv_gen_private.essiv.hash_tfm = hash_tfm; 360 cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
283 361
362 for_each_possible_cpu(cpu) {
363 essiv_tfm = setup_essiv_cpu(cc, ti, salt,
364 crypto_hash_digestsize(hash_tfm));
365 if (IS_ERR(essiv_tfm)) {
366 crypt_iv_essiv_dtr(cc);
367 return PTR_ERR(essiv_tfm);
368 }
369 per_cpu_ptr(cc->cpu, cpu)->iv_private = essiv_tfm;
370 }
371
284 return 0; 372 return 0;
285 373
286bad: 374bad:
287 if (essiv_tfm && !IS_ERR(essiv_tfm))
288 crypto_free_cipher(essiv_tfm);
289 if (hash_tfm && !IS_ERR(hash_tfm)) 375 if (hash_tfm && !IS_ERR(hash_tfm))
290 crypto_free_hash(hash_tfm); 376 crypto_free_hash(hash_tfm);
291 kfree(salt); 377 kfree(salt);
@@ -294,16 +380,19 @@ bad:
294 380
295static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 381static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
296{ 382{
383 struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private;
384
297 memset(iv, 0, cc->iv_size); 385 memset(iv, 0, cc->iv_size);
298 *(u64 *)iv = cpu_to_le64(sector); 386 *(u64 *)iv = cpu_to_le64(sector);
299 crypto_cipher_encrypt_one(cc->iv_gen_private.essiv.tfm, iv, iv); 387 crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
388
300 return 0; 389 return 0;
301} 390}
302 391
303static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, 392static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
304 const char *opts) 393 const char *opts)
305{ 394{
306 unsigned bs = crypto_ablkcipher_blocksize(cc->tfm); 395 unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
307 int log = ilog2(bs); 396 int log = ilog2(bs);
308 397
309 /* we need to calculate how far we must shift the sector count 398 /* we need to calculate how far we must shift the sector count
@@ -412,7 +501,7 @@ static int crypt_convert_block(struct crypt_config *cc,
412 501
413 dmreq = dmreq_of_req(cc, req); 502 dmreq = dmreq_of_req(cc, req);
414 iv = (u8 *)ALIGN((unsigned long)(dmreq + 1), 503 iv = (u8 *)ALIGN((unsigned long)(dmreq + 1),
415 crypto_ablkcipher_alignmask(cc->tfm) + 1); 504 crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
416 505
417 dmreq->ctx = ctx; 506 dmreq->ctx = ctx;
418 sg_init_table(&dmreq->sg_in, 1); 507 sg_init_table(&dmreq->sg_in, 1);
@@ -454,16 +543,19 @@ static int crypt_convert_block(struct crypt_config *cc,
454 543
455static void kcryptd_async_done(struct crypto_async_request *async_req, 544static void kcryptd_async_done(struct crypto_async_request *async_req,
456 int error); 545 int error);
546
457static void crypt_alloc_req(struct crypt_config *cc, 547static void crypt_alloc_req(struct crypt_config *cc,
458 struct convert_context *ctx) 548 struct convert_context *ctx)
459{ 549{
460 if (!cc->req) 550 struct crypt_cpu *this_cc = this_crypt_config(cc);
461 cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); 551
462 ablkcipher_request_set_tfm(cc->req, cc->tfm); 552 if (!this_cc->req)
463 ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG | 553 this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
464 CRYPTO_TFM_REQ_MAY_SLEEP, 554
465 kcryptd_async_done, 555 ablkcipher_request_set_tfm(this_cc->req, this_cc->tfm);
466 dmreq_of_req(cc, cc->req)); 556 ablkcipher_request_set_callback(this_cc->req,
557 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
558 kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
467} 559}
468 560
469/* 561/*
@@ -472,6 +564,7 @@ static void crypt_alloc_req(struct crypt_config *cc,
472static int crypt_convert(struct crypt_config *cc, 564static int crypt_convert(struct crypt_config *cc,
473 struct convert_context *ctx) 565 struct convert_context *ctx)
474{ 566{
567 struct crypt_cpu *this_cc = this_crypt_config(cc);
475 int r; 568 int r;
476 569
477 atomic_set(&ctx->pending, 1); 570 atomic_set(&ctx->pending, 1);
@@ -483,7 +576,7 @@ static int crypt_convert(struct crypt_config *cc,
483 576
484 atomic_inc(&ctx->pending); 577 atomic_inc(&ctx->pending);
485 578
486 r = crypt_convert_block(cc, ctx, cc->req); 579 r = crypt_convert_block(cc, ctx, this_cc->req);
487 580
488 switch (r) { 581 switch (r) {
489 /* async */ 582 /* async */
@@ -492,7 +585,7 @@ static int crypt_convert(struct crypt_config *cc,
492 INIT_COMPLETION(ctx->restart); 585 INIT_COMPLETION(ctx->restart);
493 /* fall through*/ 586 /* fall through*/
494 case -EINPROGRESS: 587 case -EINPROGRESS:
495 cc->req = NULL; 588 this_cc->req = NULL;
496 ctx->sector++; 589 ctx->sector++;
497 continue; 590 continue;
498 591
@@ -651,6 +744,9 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
651 * They must be separated as otherwise the final stages could be 744 * They must be separated as otherwise the final stages could be
652 * starved by new requests which can block in the first stages due 745 * starved by new requests which can block in the first stages due
653 * to memory allocation. 746 * to memory allocation.
747 *
748 * The work is done per CPU global for all dm-crypt instances.
749 * They should not depend on each other and do not block.
654 */ 750 */
655static void crypt_endio(struct bio *clone, int error) 751static void crypt_endio(struct bio *clone, int error)
656{ 752{
@@ -971,6 +1067,20 @@ static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
971 } 1067 }
972} 1068}
973 1069
1070static int crypt_setkey_allcpus(struct crypt_config *cc)
1071{
1072 int cpu, err = 0, r;
1073
1074 for_each_possible_cpu(cpu) {
1075 r = crypto_ablkcipher_setkey(per_cpu_ptr(cc->cpu, cpu)->tfm,
1076 cc->key, cc->key_size);
1077 if (r)
1078 err = r;
1079 }
1080
1081 return err;
1082}
1083
974static int crypt_set_key(struct crypt_config *cc, char *key) 1084static int crypt_set_key(struct crypt_config *cc, char *key)
975{ 1085{
976 /* The key size may not be changed. */ 1086 /* The key size may not be changed. */
@@ -986,19 +1096,22 @@ static int crypt_set_key(struct crypt_config *cc, char *key)
986 1096
987 set_bit(DM_CRYPT_KEY_VALID, &cc->flags); 1097 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
988 1098
989 return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size); 1099 return crypt_setkey_allcpus(cc);
990} 1100}
991 1101
992static int crypt_wipe_key(struct crypt_config *cc) 1102static int crypt_wipe_key(struct crypt_config *cc)
993{ 1103{
994 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); 1104 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
995 memset(&cc->key, 0, cc->key_size * sizeof(u8)); 1105 memset(&cc->key, 0, cc->key_size * sizeof(u8));
996 return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size); 1106
1107 return crypt_setkey_allcpus(cc);
997} 1108}
998 1109
999static void crypt_dtr(struct dm_target *ti) 1110static void crypt_dtr(struct dm_target *ti)
1000{ 1111{
1001 struct crypt_config *cc = ti->private; 1112 struct crypt_config *cc = ti->private;
1113 struct crypt_cpu *cpu_cc;
1114 int cpu;
1002 1115
1003 ti->private = NULL; 1116 ti->private = NULL;
1004 1117
@@ -1010,6 +1123,15 @@ static void crypt_dtr(struct dm_target *ti)
1010 if (cc->crypt_queue) 1123 if (cc->crypt_queue)
1011 destroy_workqueue(cc->crypt_queue); 1124 destroy_workqueue(cc->crypt_queue);
1012 1125
1126 if (cc->cpu)
1127 for_each_possible_cpu(cpu) {
1128 cpu_cc = per_cpu_ptr(cc->cpu, cpu);
1129 if (cpu_cc->req)
1130 mempool_free(cpu_cc->req, cc->req_pool);
1131 if (cpu_cc->tfm)
1132 crypto_free_ablkcipher(cpu_cc->tfm);
1133 }
1134
1013 if (cc->bs) 1135 if (cc->bs)
1014 bioset_free(cc->bs); 1136 bioset_free(cc->bs);
1015 1137
@@ -1023,12 +1145,12 @@ static void crypt_dtr(struct dm_target *ti)
1023 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 1145 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
1024 cc->iv_gen_ops->dtr(cc); 1146 cc->iv_gen_ops->dtr(cc);
1025 1147
1026 if (cc->tfm && !IS_ERR(cc->tfm))
1027 crypto_free_ablkcipher(cc->tfm);
1028
1029 if (cc->dev) 1148 if (cc->dev)
1030 dm_put_device(ti, cc->dev); 1149 dm_put_device(ti, cc->dev);
1031 1150
1151 if (cc->cpu)
1152 free_percpu(cc->cpu);
1153
1032 kzfree(cc->cipher); 1154 kzfree(cc->cipher);
1033 kzfree(cc->cipher_string); 1155 kzfree(cc->cipher_string);
1034 1156
@@ -1040,9 +1162,10 @@ static int crypt_ctr_cipher(struct dm_target *ti,
1040 char *cipher_in, char *key) 1162 char *cipher_in, char *key)
1041{ 1163{
1042 struct crypt_config *cc = ti->private; 1164 struct crypt_config *cc = ti->private;
1165 struct crypto_ablkcipher *tfm;
1043 char *tmp, *cipher, *chainmode, *ivmode, *ivopts; 1166 char *tmp, *cipher, *chainmode, *ivmode, *ivopts;
1044 char *cipher_api = NULL; 1167 char *cipher_api = NULL;
1045 int ret = -EINVAL; 1168 int cpu, ret = -EINVAL;
1046 1169
1047 /* Convert to crypto api definition? */ 1170 /* Convert to crypto api definition? */
1048 if (strchr(cipher_in, '(')) { 1171 if (strchr(cipher_in, '(')) {
@@ -1072,6 +1195,12 @@ static int crypt_ctr_cipher(struct dm_target *ti,
1072 if (tmp) 1195 if (tmp)
1073 DMWARN("Ignoring unexpected additional cipher options"); 1196 DMWARN("Ignoring unexpected additional cipher options");
1074 1197
1198 cc->cpu = alloc_percpu(struct crypt_cpu);
1199 if (!cc->cpu) {
1200 ti->error = "Cannot allocate per cpu state";
1201 goto bad_mem;
1202 }
1203
1075 /* 1204 /*
1076 * For compatibility with the original dm-crypt mapping format, if 1205 * For compatibility with the original dm-crypt mapping format, if
1077 * only the cipher name is supplied, use cbc-plain. 1206 * only the cipher name is supplied, use cbc-plain.
@@ -1098,11 +1227,14 @@ static int crypt_ctr_cipher(struct dm_target *ti,
1098 } 1227 }
1099 1228
1100 /* Allocate cipher */ 1229 /* Allocate cipher */
1101 cc->tfm = crypto_alloc_ablkcipher(cipher_api, 0, 0); 1230 for_each_possible_cpu(cpu) {
1102 if (IS_ERR(cc->tfm)) { 1231 tfm = crypto_alloc_ablkcipher(cipher_api, 0, 0);
1103 ret = PTR_ERR(cc->tfm); 1232 if (IS_ERR(tfm)) {
1104 ti->error = "Error allocating crypto tfm"; 1233 ret = PTR_ERR(tfm);
1105 goto bad; 1234 ti->error = "Error allocating crypto tfm";
1235 goto bad;
1236 }
1237 per_cpu_ptr(cc->cpu, cpu)->tfm = tfm;
1106 } 1238 }
1107 1239
1108 /* Initialize and set key */ 1240 /* Initialize and set key */
@@ -1113,7 +1245,7 @@ static int crypt_ctr_cipher(struct dm_target *ti,
1113 } 1245 }
1114 1246
1115 /* Initialize IV */ 1247 /* Initialize IV */
1116 cc->iv_size = crypto_ablkcipher_ivsize(cc->tfm); 1248 cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
1117 if (cc->iv_size) 1249 if (cc->iv_size)
1118 /* at least a 64 bit sector number should fit in our buffer */ 1250 /* at least a 64 bit sector number should fit in our buffer */
1119 cc->iv_size = max(cc->iv_size, 1251 cc->iv_size = max(cc->iv_size,
@@ -1208,9 +1340,9 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1208 } 1340 }
1209 1341
1210 cc->dmreq_start = sizeof(struct ablkcipher_request); 1342 cc->dmreq_start = sizeof(struct ablkcipher_request);
1211 cc->dmreq_start += crypto_ablkcipher_reqsize(cc->tfm); 1343 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
1212 cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); 1344 cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
1213 cc->dmreq_start += crypto_ablkcipher_alignmask(cc->tfm) & 1345 cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) &
1214 ~(crypto_tfm_ctx_alignment() - 1); 1346 ~(crypto_tfm_ctx_alignment() - 1);
1215 1347
1216 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + 1348 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
@@ -1219,7 +1351,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1219 ti->error = "Cannot allocate crypt request mempool"; 1351 ti->error = "Cannot allocate crypt request mempool";
1220 goto bad; 1352 goto bad;
1221 } 1353 }
1222 cc->req = NULL;
1223 1354
1224 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); 1355 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
1225 if (!cc->page_pool) { 1356 if (!cc->page_pool) {
@@ -1252,13 +1383,20 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1252 cc->start = tmpll; 1383 cc->start = tmpll;
1253 1384
1254 ret = -ENOMEM; 1385 ret = -ENOMEM;
1255 cc->io_queue = create_singlethread_workqueue("kcryptd_io"); 1386 cc->io_queue = alloc_workqueue("kcryptd_io",
1387 WQ_NON_REENTRANT|
1388 WQ_MEM_RECLAIM,
1389 1);
1256 if (!cc->io_queue) { 1390 if (!cc->io_queue) {
1257 ti->error = "Couldn't create kcryptd io queue"; 1391 ti->error = "Couldn't create kcryptd io queue";
1258 goto bad; 1392 goto bad;
1259 } 1393 }
1260 1394
1261 cc->crypt_queue = create_singlethread_workqueue("kcryptd"); 1395 cc->crypt_queue = alloc_workqueue("kcryptd",
1396 WQ_NON_REENTRANT|
1397 WQ_CPU_INTENSIVE|
1398 WQ_MEM_RECLAIM,
1399 1);
1262 if (!cc->crypt_queue) { 1400 if (!cc->crypt_queue) {
1263 ti->error = "Couldn't create kcryptd queue"; 1401 ti->error = "Couldn't create kcryptd queue";
1264 goto bad; 1402 goto bad;
@@ -1418,7 +1556,7 @@ static int crypt_iterate_devices(struct dm_target *ti,
1418 1556
1419static struct target_type crypt_target = { 1557static struct target_type crypt_target = {
1420 .name = "crypt", 1558 .name = "crypt",
1421 .version = {1, 8, 0}, 1559 .version = {1, 9, 0},
1422 .module = THIS_MODULE, 1560 .module = THIS_MODULE,
1423 .ctr = crypt_ctr, 1561 .ctr = crypt_ctr,
1424 .dtr = crypt_dtr, 1562 .dtr = crypt_dtr,