aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-crypt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r--drivers/md/dm-crypt.c626
1 files changed, 514 insertions, 112 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 368e8e98f705..c8827ffd85bb 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -18,10 +18,14 @@
18#include <linux/crypto.h> 18#include <linux/crypto.h>
19#include <linux/workqueue.h> 19#include <linux/workqueue.h>
20#include <linux/backing-dev.h> 20#include <linux/backing-dev.h>
21#include <linux/percpu.h>
21#include <asm/atomic.h> 22#include <asm/atomic.h>
22#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
23#include <asm/page.h> 24#include <asm/page.h>
24#include <asm/unaligned.h> 25#include <asm/unaligned.h>
26#include <crypto/hash.h>
27#include <crypto/md5.h>
28#include <crypto/algapi.h>
25 29
26#include <linux/device-mapper.h> 30#include <linux/device-mapper.h>
27 31
@@ -63,6 +67,7 @@ struct dm_crypt_request {
63 struct convert_context *ctx; 67 struct convert_context *ctx;
64 struct scatterlist sg_in; 68 struct scatterlist sg_in;
65 struct scatterlist sg_out; 69 struct scatterlist sg_out;
70 sector_t iv_sector;
66}; 71};
67 72
68struct crypt_config; 73struct crypt_config;
@@ -73,11 +78,13 @@ struct crypt_iv_operations {
73 void (*dtr)(struct crypt_config *cc); 78 void (*dtr)(struct crypt_config *cc);
74 int (*init)(struct crypt_config *cc); 79 int (*init)(struct crypt_config *cc);
75 int (*wipe)(struct crypt_config *cc); 80 int (*wipe)(struct crypt_config *cc);
76 int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector); 81 int (*generator)(struct crypt_config *cc, u8 *iv,
82 struct dm_crypt_request *dmreq);
83 int (*post)(struct crypt_config *cc, u8 *iv,
84 struct dm_crypt_request *dmreq);
77}; 85};
78 86
79struct iv_essiv_private { 87struct iv_essiv_private {
80 struct crypto_cipher *tfm;
81 struct crypto_hash *hash_tfm; 88 struct crypto_hash *hash_tfm;
82 u8 *salt; 89 u8 *salt;
83}; 90};
@@ -86,11 +93,32 @@ struct iv_benbi_private {
86 int shift; 93 int shift;
87}; 94};
88 95
96#define LMK_SEED_SIZE 64 /* hash + 0 */
97struct iv_lmk_private {
98 struct crypto_shash *hash_tfm;
99 u8 *seed;
100};
101
89/* 102/*
90 * Crypt: maps a linear range of a block device 103 * Crypt: maps a linear range of a block device
91 * and encrypts / decrypts at the same time. 104 * and encrypts / decrypts at the same time.
92 */ 105 */
93enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; 106enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
107
108/*
109 * Duplicated per-CPU state for cipher.
110 */
111struct crypt_cpu {
112 struct ablkcipher_request *req;
113 /* ESSIV: struct crypto_cipher *essiv_tfm */
114 void *iv_private;
115 struct crypto_ablkcipher *tfms[0];
116};
117
118/*
119 * The fields in here must be read only after initialization,
120 * changing state should be in crypt_cpu.
121 */
94struct crypt_config { 122struct crypt_config {
95 struct dm_dev *dev; 123 struct dm_dev *dev;
96 sector_t start; 124 sector_t start;
@@ -108,17 +136,25 @@ struct crypt_config {
108 struct workqueue_struct *crypt_queue; 136 struct workqueue_struct *crypt_queue;
109 137
110 char *cipher; 138 char *cipher;
111 char *cipher_mode; 139 char *cipher_string;
112 140
113 struct crypt_iv_operations *iv_gen_ops; 141 struct crypt_iv_operations *iv_gen_ops;
114 union { 142 union {
115 struct iv_essiv_private essiv; 143 struct iv_essiv_private essiv;
116 struct iv_benbi_private benbi; 144 struct iv_benbi_private benbi;
145 struct iv_lmk_private lmk;
117 } iv_gen_private; 146 } iv_gen_private;
118 sector_t iv_offset; 147 sector_t iv_offset;
119 unsigned int iv_size; 148 unsigned int iv_size;
120 149
121 /* 150 /*
151 * Duplicated per cpu state. Access through
152 * per_cpu_ptr() only.
153 */
154 struct crypt_cpu __percpu *cpu;
155 unsigned tfms_count;
156
157 /*
122 * Layout of each crypto request: 158 * Layout of each crypto request:
123 * 159 *
124 * struct ablkcipher_request 160 * struct ablkcipher_request
@@ -132,11 +168,10 @@ struct crypt_config {
132 * correctly aligned. 168 * correctly aligned.
133 */ 169 */
134 unsigned int dmreq_start; 170 unsigned int dmreq_start;
135 struct ablkcipher_request *req;
136 171
137 struct crypto_ablkcipher *tfm;
138 unsigned long flags; 172 unsigned long flags;
139 unsigned int key_size; 173 unsigned int key_size;
174 unsigned int key_parts;
140 u8 key[0]; 175 u8 key[0];
141}; 176};
142 177
@@ -148,6 +183,20 @@ static struct kmem_cache *_crypt_io_pool;
148 183
149static void clone_init(struct dm_crypt_io *, struct bio *); 184static void clone_init(struct dm_crypt_io *, struct bio *);
150static void kcryptd_queue_crypt(struct dm_crypt_io *io); 185static void kcryptd_queue_crypt(struct dm_crypt_io *io);
186static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
187
188static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
189{
190 return this_cpu_ptr(cc->cpu);
191}
192
193/*
194 * Use this to access cipher attributes that are the same for each CPU.
195 */
196static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
197{
198 return __this_cpu_ptr(cc->cpu)->tfms[0];
199}
151 200
152/* 201/*
153 * Different IV generation algorithms: 202 * Different IV generation algorithms:
@@ -168,23 +217,38 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io);
168 * null: the initial vector is always zero. Provides compatibility with 217 * null: the initial vector is always zero. Provides compatibility with
169 * obsolete loop_fish2 devices. Do not use for new devices. 218 * obsolete loop_fish2 devices. Do not use for new devices.
170 * 219 *
220 * lmk: Compatible implementation of the block chaining mode used
221 * by the Loop-AES block device encryption system
222 * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/
223 * It operates on full 512 byte sectors and uses CBC
224 * with an IV derived from the sector number, the data and
225 * optionally extra IV seed.
226 * This means that after decryption the first block
227 * of sector must be tweaked according to decrypted data.
228 * Loop-AES can use three encryption schemes:
229 * version 1: is plain aes-cbc mode
230 * version 2: uses 64 multikey scheme with lmk IV generator
231 * version 3: the same as version 2 with additional IV seed
232 * (it uses 65 keys, last key is used as IV seed)
233 *
171 * plumb: unimplemented, see: 234 * plumb: unimplemented, see:
172 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 235 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
173 */ 236 */
174 237
175static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 238static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
239 struct dm_crypt_request *dmreq)
176{ 240{
177 memset(iv, 0, cc->iv_size); 241 memset(iv, 0, cc->iv_size);
178 *(u32 *)iv = cpu_to_le32(sector & 0xffffffff); 242 *(u32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
179 243
180 return 0; 244 return 0;
181} 245}
182 246
183static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, 247static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
184 sector_t sector) 248 struct dm_crypt_request *dmreq)
185{ 249{
186 memset(iv, 0, cc->iv_size); 250 memset(iv, 0, cc->iv_size);
187 *(u64 *)iv = cpu_to_le64(sector); 251 *(u64 *)iv = cpu_to_le64(dmreq->iv_sector);
188 252
189 return 0; 253 return 0;
190} 254}
@@ -195,7 +259,8 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
195 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 259 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
196 struct hash_desc desc; 260 struct hash_desc desc;
197 struct scatterlist sg; 261 struct scatterlist sg;
198 int err; 262 struct crypto_cipher *essiv_tfm;
263 int err, cpu;
199 264
200 sg_init_one(&sg, cc->key, cc->key_size); 265 sg_init_one(&sg, cc->key, cc->key_size);
201 desc.tfm = essiv->hash_tfm; 266 desc.tfm = essiv->hash_tfm;
@@ -205,8 +270,16 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
205 if (err) 270 if (err)
206 return err; 271 return err;
207 272
208 return crypto_cipher_setkey(essiv->tfm, essiv->salt, 273 for_each_possible_cpu(cpu) {
274 essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private,
275
276 err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
209 crypto_hash_digestsize(essiv->hash_tfm)); 277 crypto_hash_digestsize(essiv->hash_tfm));
278 if (err)
279 return err;
280 }
281
282 return 0;
210} 283}
211 284
212/* Wipe salt and reset key derived from volume key */ 285/* Wipe salt and reset key derived from volume key */
@@ -214,24 +287,76 @@ static int crypt_iv_essiv_wipe(struct crypt_config *cc)
214{ 287{
215 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 288 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
216 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm); 289 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
290 struct crypto_cipher *essiv_tfm;
291 int cpu, r, err = 0;
217 292
218 memset(essiv->salt, 0, salt_size); 293 memset(essiv->salt, 0, salt_size);
219 294
220 return crypto_cipher_setkey(essiv->tfm, essiv->salt, salt_size); 295 for_each_possible_cpu(cpu) {
296 essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private;
297 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
298 if (r)
299 err = r;
300 }
301
302 return err;
303}
304
305/* Set up per cpu cipher state */
306static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
307 struct dm_target *ti,
308 u8 *salt, unsigned saltsize)
309{
310 struct crypto_cipher *essiv_tfm;
311 int err;
312
313 /* Setup the essiv_tfm with the given salt */
314 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
315 if (IS_ERR(essiv_tfm)) {
316 ti->error = "Error allocating crypto tfm for ESSIV";
317 return essiv_tfm;
318 }
319
320 if (crypto_cipher_blocksize(essiv_tfm) !=
321 crypto_ablkcipher_ivsize(any_tfm(cc))) {
322 ti->error = "Block size of ESSIV cipher does "
323 "not match IV size of block cipher";
324 crypto_free_cipher(essiv_tfm);
325 return ERR_PTR(-EINVAL);
326 }
327
328 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
329 if (err) {
330 ti->error = "Failed to set key for ESSIV cipher";
331 crypto_free_cipher(essiv_tfm);
332 return ERR_PTR(err);
333 }
334
335 return essiv_tfm;
221} 336}
222 337
223static void crypt_iv_essiv_dtr(struct crypt_config *cc) 338static void crypt_iv_essiv_dtr(struct crypt_config *cc)
224{ 339{
340 int cpu;
341 struct crypt_cpu *cpu_cc;
342 struct crypto_cipher *essiv_tfm;
225 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 343 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
226 344
227 crypto_free_cipher(essiv->tfm);
228 essiv->tfm = NULL;
229
230 crypto_free_hash(essiv->hash_tfm); 345 crypto_free_hash(essiv->hash_tfm);
231 essiv->hash_tfm = NULL; 346 essiv->hash_tfm = NULL;
232 347
233 kzfree(essiv->salt); 348 kzfree(essiv->salt);
234 essiv->salt = NULL; 349 essiv->salt = NULL;
350
351 for_each_possible_cpu(cpu) {
352 cpu_cc = per_cpu_ptr(cc->cpu, cpu);
353 essiv_tfm = cpu_cc->iv_private;
354
355 if (essiv_tfm)
356 crypto_free_cipher(essiv_tfm);
357
358 cpu_cc->iv_private = NULL;
359 }
235} 360}
236 361
237static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, 362static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
@@ -240,7 +365,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
240 struct crypto_cipher *essiv_tfm = NULL; 365 struct crypto_cipher *essiv_tfm = NULL;
241 struct crypto_hash *hash_tfm = NULL; 366 struct crypto_hash *hash_tfm = NULL;
242 u8 *salt = NULL; 367 u8 *salt = NULL;
243 int err; 368 int err, cpu;
244 369
245 if (!opts) { 370 if (!opts) {
246 ti->error = "Digest algorithm missing for ESSIV mode"; 371 ti->error = "Digest algorithm missing for ESSIV mode";
@@ -262,48 +387,44 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
262 goto bad; 387 goto bad;
263 } 388 }
264 389
265 /* Allocate essiv_tfm */
266 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
267 if (IS_ERR(essiv_tfm)) {
268 ti->error = "Error allocating crypto tfm for ESSIV";
269 err = PTR_ERR(essiv_tfm);
270 goto bad;
271 }
272 if (crypto_cipher_blocksize(essiv_tfm) !=
273 crypto_ablkcipher_ivsize(cc->tfm)) {
274 ti->error = "Block size of ESSIV cipher does "
275 "not match IV size of block cipher";
276 err = -EINVAL;
277 goto bad;
278 }
279
280 cc->iv_gen_private.essiv.salt = salt; 390 cc->iv_gen_private.essiv.salt = salt;
281 cc->iv_gen_private.essiv.tfm = essiv_tfm;
282 cc->iv_gen_private.essiv.hash_tfm = hash_tfm; 391 cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
283 392
393 for_each_possible_cpu(cpu) {
394 essiv_tfm = setup_essiv_cpu(cc, ti, salt,
395 crypto_hash_digestsize(hash_tfm));
396 if (IS_ERR(essiv_tfm)) {
397 crypt_iv_essiv_dtr(cc);
398 return PTR_ERR(essiv_tfm);
399 }
400 per_cpu_ptr(cc->cpu, cpu)->iv_private = essiv_tfm;
401 }
402
284 return 0; 403 return 0;
285 404
286bad: 405bad:
287 if (essiv_tfm && !IS_ERR(essiv_tfm))
288 crypto_free_cipher(essiv_tfm);
289 if (hash_tfm && !IS_ERR(hash_tfm)) 406 if (hash_tfm && !IS_ERR(hash_tfm))
290 crypto_free_hash(hash_tfm); 407 crypto_free_hash(hash_tfm);
291 kfree(salt); 408 kfree(salt);
292 return err; 409 return err;
293} 410}
294 411
295static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 412static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
413 struct dm_crypt_request *dmreq)
296{ 414{
415 struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private;
416
297 memset(iv, 0, cc->iv_size); 417 memset(iv, 0, cc->iv_size);
298 *(u64 *)iv = cpu_to_le64(sector); 418 *(u64 *)iv = cpu_to_le64(dmreq->iv_sector);
299 crypto_cipher_encrypt_one(cc->iv_gen_private.essiv.tfm, iv, iv); 419 crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
420
300 return 0; 421 return 0;
301} 422}
302 423
303static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, 424static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
304 const char *opts) 425 const char *opts)
305{ 426{
306 unsigned bs = crypto_ablkcipher_blocksize(cc->tfm); 427 unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
307 int log = ilog2(bs); 428 int log = ilog2(bs);
308 429
309 /* we need to calculate how far we must shift the sector count 430 /* we need to calculate how far we must shift the sector count
@@ -328,25 +449,177 @@ static void crypt_iv_benbi_dtr(struct crypt_config *cc)
328{ 449{
329} 450}
330 451
331static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 452static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
453 struct dm_crypt_request *dmreq)
332{ 454{
333 __be64 val; 455 __be64 val;
334 456
335 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ 457 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
336 458
337 val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi.shift) + 1); 459 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
338 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); 460 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
339 461
340 return 0; 462 return 0;
341} 463}
342 464
343static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 465static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
466 struct dm_crypt_request *dmreq)
344{ 467{
345 memset(iv, 0, cc->iv_size); 468 memset(iv, 0, cc->iv_size);
346 469
347 return 0; 470 return 0;
348} 471}
349 472
473static void crypt_iv_lmk_dtr(struct crypt_config *cc)
474{
475 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
476
477 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
478 crypto_free_shash(lmk->hash_tfm);
479 lmk->hash_tfm = NULL;
480
481 kzfree(lmk->seed);
482 lmk->seed = NULL;
483}
484
485static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
486 const char *opts)
487{
488 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
489
490 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
491 if (IS_ERR(lmk->hash_tfm)) {
492 ti->error = "Error initializing LMK hash";
493 return PTR_ERR(lmk->hash_tfm);
494 }
495
496 /* No seed in LMK version 2 */
497 if (cc->key_parts == cc->tfms_count) {
498 lmk->seed = NULL;
499 return 0;
500 }
501
502 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
503 if (!lmk->seed) {
504 crypt_iv_lmk_dtr(cc);
505 ti->error = "Error kmallocing seed storage in LMK";
506 return -ENOMEM;
507 }
508
509 return 0;
510}
511
512static int crypt_iv_lmk_init(struct crypt_config *cc)
513{
514 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
515 int subkey_size = cc->key_size / cc->key_parts;
516
517 /* LMK seed is on the position of LMK_KEYS + 1 key */
518 if (lmk->seed)
519 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
520 crypto_shash_digestsize(lmk->hash_tfm));
521
522 return 0;
523}
524
525static int crypt_iv_lmk_wipe(struct crypt_config *cc)
526{
527 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
528
529 if (lmk->seed)
530 memset(lmk->seed, 0, LMK_SEED_SIZE);
531
532 return 0;
533}
534
535static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
536 struct dm_crypt_request *dmreq,
537 u8 *data)
538{
539 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
540 struct {
541 struct shash_desc desc;
542 char ctx[crypto_shash_descsize(lmk->hash_tfm)];
543 } sdesc;
544 struct md5_state md5state;
545 u32 buf[4];
546 int i, r;
547
548 sdesc.desc.tfm = lmk->hash_tfm;
549 sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
550
551 r = crypto_shash_init(&sdesc.desc);
552 if (r)
553 return r;
554
555 if (lmk->seed) {
556 r = crypto_shash_update(&sdesc.desc, lmk->seed, LMK_SEED_SIZE);
557 if (r)
558 return r;
559 }
560
561 /* Sector is always 512B, block size 16, add data of blocks 1-31 */
562 r = crypto_shash_update(&sdesc.desc, data + 16, 16 * 31);
563 if (r)
564 return r;
565
566 /* Sector is cropped to 56 bits here */
567 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
568 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
569 buf[2] = cpu_to_le32(4024);
570 buf[3] = 0;
571 r = crypto_shash_update(&sdesc.desc, (u8 *)buf, sizeof(buf));
572 if (r)
573 return r;
574
575 /* No MD5 padding here */
576 r = crypto_shash_export(&sdesc.desc, &md5state);
577 if (r)
578 return r;
579
580 for (i = 0; i < MD5_HASH_WORDS; i++)
581 __cpu_to_le32s(&md5state.hash[i]);
582 memcpy(iv, &md5state.hash, cc->iv_size);
583
584 return 0;
585}
586
587static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
588 struct dm_crypt_request *dmreq)
589{
590 u8 *src;
591 int r = 0;
592
593 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
594 src = kmap_atomic(sg_page(&dmreq->sg_in), KM_USER0);
595 r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset);
596 kunmap_atomic(src, KM_USER0);
597 } else
598 memset(iv, 0, cc->iv_size);
599
600 return r;
601}
602
603static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
604 struct dm_crypt_request *dmreq)
605{
606 u8 *dst;
607 int r;
608
609 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
610 return 0;
611
612 dst = kmap_atomic(sg_page(&dmreq->sg_out), KM_USER0);
613 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset);
614
615 /* Tweak the first block of plaintext sector */
616 if (!r)
617 crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size);
618
619 kunmap_atomic(dst, KM_USER0);
620 return r;
621}
622
350static struct crypt_iv_operations crypt_iv_plain_ops = { 623static struct crypt_iv_operations crypt_iv_plain_ops = {
351 .generator = crypt_iv_plain_gen 624 .generator = crypt_iv_plain_gen
352}; 625};
@@ -373,6 +646,15 @@ static struct crypt_iv_operations crypt_iv_null_ops = {
373 .generator = crypt_iv_null_gen 646 .generator = crypt_iv_null_gen
374}; 647};
375 648
649static struct crypt_iv_operations crypt_iv_lmk_ops = {
650 .ctr = crypt_iv_lmk_ctr,
651 .dtr = crypt_iv_lmk_dtr,
652 .init = crypt_iv_lmk_init,
653 .wipe = crypt_iv_lmk_wipe,
654 .generator = crypt_iv_lmk_gen,
655 .post = crypt_iv_lmk_post
656};
657
376static void crypt_convert_init(struct crypt_config *cc, 658static void crypt_convert_init(struct crypt_config *cc,
377 struct convert_context *ctx, 659 struct convert_context *ctx,
378 struct bio *bio_out, struct bio *bio_in, 660 struct bio *bio_out, struct bio *bio_in,
@@ -400,6 +682,13 @@ static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
400 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start); 682 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
401} 683}
402 684
685static u8 *iv_of_dmreq(struct crypt_config *cc,
686 struct dm_crypt_request *dmreq)
687{
688 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
689 crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
690}
691
403static int crypt_convert_block(struct crypt_config *cc, 692static int crypt_convert_block(struct crypt_config *cc,
404 struct convert_context *ctx, 693 struct convert_context *ctx,
405 struct ablkcipher_request *req) 694 struct ablkcipher_request *req)
@@ -411,9 +700,9 @@ static int crypt_convert_block(struct crypt_config *cc,
411 int r = 0; 700 int r = 0;
412 701
413 dmreq = dmreq_of_req(cc, req); 702 dmreq = dmreq_of_req(cc, req);
414 iv = (u8 *)ALIGN((unsigned long)(dmreq + 1), 703 iv = iv_of_dmreq(cc, dmreq);
415 crypto_ablkcipher_alignmask(cc->tfm) + 1);
416 704
705 dmreq->iv_sector = ctx->sector;
417 dmreq->ctx = ctx; 706 dmreq->ctx = ctx;
418 sg_init_table(&dmreq->sg_in, 1); 707 sg_init_table(&dmreq->sg_in, 1);
419 sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, 708 sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
@@ -436,7 +725,7 @@ static int crypt_convert_block(struct crypt_config *cc,
436 } 725 }
437 726
438 if (cc->iv_gen_ops) { 727 if (cc->iv_gen_ops) {
439 r = cc->iv_gen_ops->generator(cc, iv, ctx->sector); 728 r = cc->iv_gen_ops->generator(cc, iv, dmreq);
440 if (r < 0) 729 if (r < 0)
441 return r; 730 return r;
442 } 731 }
@@ -449,21 +738,28 @@ static int crypt_convert_block(struct crypt_config *cc,
449 else 738 else
450 r = crypto_ablkcipher_decrypt(req); 739 r = crypto_ablkcipher_decrypt(req);
451 740
741 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
742 r = cc->iv_gen_ops->post(cc, iv, dmreq);
743
452 return r; 744 return r;
453} 745}
454 746
455static void kcryptd_async_done(struct crypto_async_request *async_req, 747static void kcryptd_async_done(struct crypto_async_request *async_req,
456 int error); 748 int error);
749
457static void crypt_alloc_req(struct crypt_config *cc, 750static void crypt_alloc_req(struct crypt_config *cc,
458 struct convert_context *ctx) 751 struct convert_context *ctx)
459{ 752{
460 if (!cc->req) 753 struct crypt_cpu *this_cc = this_crypt_config(cc);
461 cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); 754 unsigned key_index = ctx->sector & (cc->tfms_count - 1);
462 ablkcipher_request_set_tfm(cc->req, cc->tfm); 755
463 ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG | 756 if (!this_cc->req)
464 CRYPTO_TFM_REQ_MAY_SLEEP, 757 this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
465 kcryptd_async_done, 758
466 dmreq_of_req(cc, cc->req)); 759 ablkcipher_request_set_tfm(this_cc->req, this_cc->tfms[key_index]);
760 ablkcipher_request_set_callback(this_cc->req,
761 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
762 kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
467} 763}
468 764
469/* 765/*
@@ -472,6 +768,7 @@ static void crypt_alloc_req(struct crypt_config *cc,
472static int crypt_convert(struct crypt_config *cc, 768static int crypt_convert(struct crypt_config *cc,
473 struct convert_context *ctx) 769 struct convert_context *ctx)
474{ 770{
771 struct crypt_cpu *this_cc = this_crypt_config(cc);
475 int r; 772 int r;
476 773
477 atomic_set(&ctx->pending, 1); 774 atomic_set(&ctx->pending, 1);
@@ -483,7 +780,7 @@ static int crypt_convert(struct crypt_config *cc,
483 780
484 atomic_inc(&ctx->pending); 781 atomic_inc(&ctx->pending);
485 782
486 r = crypt_convert_block(cc, ctx, cc->req); 783 r = crypt_convert_block(cc, ctx, this_cc->req);
487 784
488 switch (r) { 785 switch (r) {
489 /* async */ 786 /* async */
@@ -492,7 +789,7 @@ static int crypt_convert(struct crypt_config *cc,
492 INIT_COMPLETION(ctx->restart); 789 INIT_COMPLETION(ctx->restart);
493 /* fall through*/ 790 /* fall through*/
494 case -EINPROGRESS: 791 case -EINPROGRESS:
495 cc->req = NULL; 792 this_cc->req = NULL;
496 ctx->sector++; 793 ctx->sector++;
497 continue; 794 continue;
498 795
@@ -651,6 +948,9 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
651 * They must be separated as otherwise the final stages could be 948 * They must be separated as otherwise the final stages could be
652 * starved by new requests which can block in the first stages due 949 * starved by new requests which can block in the first stages due
653 * to memory allocation. 950 * to memory allocation.
951 *
952 * The work is done per CPU global for all dm-crypt instances.
953 * They should not depend on each other and do not block.
654 */ 954 */
655static void crypt_endio(struct bio *clone, int error) 955static void crypt_endio(struct bio *clone, int error)
656{ 956{
@@ -691,25 +991,22 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
691 clone->bi_destructor = dm_crypt_bio_destructor; 991 clone->bi_destructor = dm_crypt_bio_destructor;
692} 992}
693 993
694static void kcryptd_io_read(struct dm_crypt_io *io) 994static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
695{ 995{
696 struct crypt_config *cc = io->target->private; 996 struct crypt_config *cc = io->target->private;
697 struct bio *base_bio = io->base_bio; 997 struct bio *base_bio = io->base_bio;
698 struct bio *clone; 998 struct bio *clone;
699 999
700 crypt_inc_pending(io);
701
702 /* 1000 /*
703 * The block layer might modify the bvec array, so always 1001 * The block layer might modify the bvec array, so always
704 * copy the required bvecs because we need the original 1002 * copy the required bvecs because we need the original
705 * one in order to decrypt the whole bio data *afterwards*. 1003 * one in order to decrypt the whole bio data *afterwards*.
706 */ 1004 */
707 clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs); 1005 clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs);
708 if (unlikely(!clone)) { 1006 if (!clone)
709 io->error = -ENOMEM; 1007 return 1;
710 crypt_dec_pending(io); 1008
711 return; 1009 crypt_inc_pending(io);
712 }
713 1010
714 clone_init(io, clone); 1011 clone_init(io, clone);
715 clone->bi_idx = 0; 1012 clone->bi_idx = 0;
@@ -720,6 +1017,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io)
720 sizeof(struct bio_vec) * clone->bi_vcnt); 1017 sizeof(struct bio_vec) * clone->bi_vcnt);
721 1018
722 generic_make_request(clone); 1019 generic_make_request(clone);
1020 return 0;
723} 1021}
724 1022
725static void kcryptd_io_write(struct dm_crypt_io *io) 1023static void kcryptd_io_write(struct dm_crypt_io *io)
@@ -732,9 +1030,12 @@ static void kcryptd_io(struct work_struct *work)
732{ 1030{
733 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 1031 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
734 1032
735 if (bio_data_dir(io->base_bio) == READ) 1033 if (bio_data_dir(io->base_bio) == READ) {
736 kcryptd_io_read(io); 1034 crypt_inc_pending(io);
737 else 1035 if (kcryptd_io_read(io, GFP_NOIO))
1036 io->error = -ENOMEM;
1037 crypt_dec_pending(io);
1038 } else
738 kcryptd_io_write(io); 1039 kcryptd_io_write(io);
739} 1040}
740 1041
@@ -901,6 +1202,9 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
901 return; 1202 return;
902 } 1203 }
903 1204
1205 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
1206 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
1207
904 mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); 1208 mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
905 1209
906 if (!atomic_dec_and_test(&ctx->pending)) 1210 if (!atomic_dec_and_test(&ctx->pending))
@@ -971,34 +1275,93 @@ static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
971 } 1275 }
972} 1276}
973 1277
1278static void crypt_free_tfms(struct crypt_config *cc, int cpu)
1279{
1280 struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu);
1281 unsigned i;
1282
1283 for (i = 0; i < cc->tfms_count; i++)
1284 if (cpu_cc->tfms[i] && !IS_ERR(cpu_cc->tfms[i])) {
1285 crypto_free_ablkcipher(cpu_cc->tfms[i]);
1286 cpu_cc->tfms[i] = NULL;
1287 }
1288}
1289
1290static int crypt_alloc_tfms(struct crypt_config *cc, int cpu, char *ciphermode)
1291{
1292 struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu);
1293 unsigned i;
1294 int err;
1295
1296 for (i = 0; i < cc->tfms_count; i++) {
1297 cpu_cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
1298 if (IS_ERR(cpu_cc->tfms[i])) {
1299 err = PTR_ERR(cpu_cc->tfms[i]);
1300 crypt_free_tfms(cc, cpu);
1301 return err;
1302 }
1303 }
1304
1305 return 0;
1306}
1307
1308static int crypt_setkey_allcpus(struct crypt_config *cc)
1309{
1310 unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count);
1311 int cpu, err = 0, i, r;
1312
1313 for_each_possible_cpu(cpu) {
1314 for (i = 0; i < cc->tfms_count; i++) {
1315 r = crypto_ablkcipher_setkey(per_cpu_ptr(cc->cpu, cpu)->tfms[i],
1316 cc->key + (i * subkey_size), subkey_size);
1317 if (r)
1318 err = r;
1319 }
1320 }
1321
1322 return err;
1323}
1324
974static int crypt_set_key(struct crypt_config *cc, char *key) 1325static int crypt_set_key(struct crypt_config *cc, char *key)
975{ 1326{
976 unsigned key_size = strlen(key) >> 1; 1327 int r = -EINVAL;
1328 int key_string_len = strlen(key);
977 1329
978 if (cc->key_size && cc->key_size != key_size) 1330 /* The key size may not be changed. */
979 return -EINVAL; 1331 if (cc->key_size != (key_string_len >> 1))
1332 goto out;
980 1333
981 cc->key_size = key_size; /* initial settings */ 1334 /* Hyphen (which gives a key_size of zero) means there is no key. */
1335 if (!cc->key_size && strcmp(key, "-"))
1336 goto out;
982 1337
983 if ((!key_size && strcmp(key, "-")) || 1338 if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
984 (key_size && crypt_decode_key(cc->key, key, key_size) < 0)) 1339 goto out;
985 return -EINVAL;
986 1340
987 set_bit(DM_CRYPT_KEY_VALID, &cc->flags); 1341 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
988 1342
989 return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size); 1343 r = crypt_setkey_allcpus(cc);
1344
1345out:
1346 /* Hex key string not needed after here, so wipe it. */
1347 memset(key, '0', key_string_len);
1348
1349 return r;
990} 1350}
991 1351
992static int crypt_wipe_key(struct crypt_config *cc) 1352static int crypt_wipe_key(struct crypt_config *cc)
993{ 1353{
994 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); 1354 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
995 memset(&cc->key, 0, cc->key_size * sizeof(u8)); 1355 memset(&cc->key, 0, cc->key_size * sizeof(u8));
996 return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size); 1356
1357 return crypt_setkey_allcpus(cc);
997} 1358}
998 1359
999static void crypt_dtr(struct dm_target *ti) 1360static void crypt_dtr(struct dm_target *ti)
1000{ 1361{
1001 struct crypt_config *cc = ti->private; 1362 struct crypt_config *cc = ti->private;
1363 struct crypt_cpu *cpu_cc;
1364 int cpu;
1002 1365
1003 ti->private = NULL; 1366 ti->private = NULL;
1004 1367
@@ -1010,6 +1373,14 @@ static void crypt_dtr(struct dm_target *ti)
1010 if (cc->crypt_queue) 1373 if (cc->crypt_queue)
1011 destroy_workqueue(cc->crypt_queue); 1374 destroy_workqueue(cc->crypt_queue);
1012 1375
1376 if (cc->cpu)
1377 for_each_possible_cpu(cpu) {
1378 cpu_cc = per_cpu_ptr(cc->cpu, cpu);
1379 if (cpu_cc->req)
1380 mempool_free(cpu_cc->req, cc->req_pool);
1381 crypt_free_tfms(cc, cpu);
1382 }
1383
1013 if (cc->bs) 1384 if (cc->bs)
1014 bioset_free(cc->bs); 1385 bioset_free(cc->bs);
1015 1386
@@ -1023,14 +1394,14 @@ static void crypt_dtr(struct dm_target *ti)
1023 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 1394 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
1024 cc->iv_gen_ops->dtr(cc); 1395 cc->iv_gen_ops->dtr(cc);
1025 1396
1026 if (cc->tfm && !IS_ERR(cc->tfm))
1027 crypto_free_ablkcipher(cc->tfm);
1028
1029 if (cc->dev) 1397 if (cc->dev)
1030 dm_put_device(ti, cc->dev); 1398 dm_put_device(ti, cc->dev);
1031 1399
1400 if (cc->cpu)
1401 free_percpu(cc->cpu);
1402
1032 kzfree(cc->cipher); 1403 kzfree(cc->cipher);
1033 kzfree(cc->cipher_mode); 1404 kzfree(cc->cipher_string);
1034 1405
1035 /* Must zero key material before freeing */ 1406 /* Must zero key material before freeing */
1036 kzfree(cc); 1407 kzfree(cc);
@@ -1040,9 +1411,9 @@ static int crypt_ctr_cipher(struct dm_target *ti,
1040 char *cipher_in, char *key) 1411 char *cipher_in, char *key)
1041{ 1412{
1042 struct crypt_config *cc = ti->private; 1413 struct crypt_config *cc = ti->private;
1043 char *tmp, *cipher, *chainmode, *ivmode, *ivopts; 1414 char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
1044 char *cipher_api = NULL; 1415 char *cipher_api = NULL;
1045 int ret = -EINVAL; 1416 int cpu, ret = -EINVAL;
1046 1417
1047 /* Convert to crypto api definition? */ 1418 /* Convert to crypto api definition? */
1048 if (strchr(cipher_in, '(')) { 1419 if (strchr(cipher_in, '(')) {
@@ -1050,23 +1421,31 @@ static int crypt_ctr_cipher(struct dm_target *ti,
1050 return -EINVAL; 1421 return -EINVAL;
1051 } 1422 }
1052 1423
1424 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
1425 if (!cc->cipher_string)
1426 goto bad_mem;
1427
1053 /* 1428 /*
1054 * Legacy dm-crypt cipher specification 1429 * Legacy dm-crypt cipher specification
1055 * cipher-mode-iv:ivopts 1430 * cipher[:keycount]-mode-iv:ivopts
1056 */ 1431 */
1057 tmp = cipher_in; 1432 tmp = cipher_in;
1058 cipher = strsep(&tmp, "-"); 1433 keycount = strsep(&tmp, "-");
1434 cipher = strsep(&keycount, ":");
1435
1436 if (!keycount)
1437 cc->tfms_count = 1;
1438 else if (sscanf(keycount, "%u", &cc->tfms_count) != 1 ||
1439 !is_power_of_2(cc->tfms_count)) {
1440 ti->error = "Bad cipher key count specification";
1441 return -EINVAL;
1442 }
1443 cc->key_parts = cc->tfms_count;
1059 1444
1060 cc->cipher = kstrdup(cipher, GFP_KERNEL); 1445 cc->cipher = kstrdup(cipher, GFP_KERNEL);
1061 if (!cc->cipher) 1446 if (!cc->cipher)
1062 goto bad_mem; 1447 goto bad_mem;
1063 1448
1064 if (tmp) {
1065 cc->cipher_mode = kstrdup(tmp, GFP_KERNEL);
1066 if (!cc->cipher_mode)
1067 goto bad_mem;
1068 }
1069
1070 chainmode = strsep(&tmp, "-"); 1449 chainmode = strsep(&tmp, "-");
1071 ivopts = strsep(&tmp, "-"); 1450 ivopts = strsep(&tmp, "-");
1072 ivmode = strsep(&ivopts, ":"); 1451 ivmode = strsep(&ivopts, ":");
@@ -1074,10 +1453,19 @@ static int crypt_ctr_cipher(struct dm_target *ti,
1074 if (tmp) 1453 if (tmp)
1075 DMWARN("Ignoring unexpected additional cipher options"); 1454 DMWARN("Ignoring unexpected additional cipher options");
1076 1455
1077 /* Compatibility mode for old dm-crypt mappings */ 1456 cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)) +
1457 cc->tfms_count * sizeof(*(cc->cpu->tfms)),
1458 __alignof__(struct crypt_cpu));
1459 if (!cc->cpu) {
1460 ti->error = "Cannot allocate per cpu state";
1461 goto bad_mem;
1462 }
1463
1464 /*
1465 * For compatibility with the original dm-crypt mapping format, if
1466 * only the cipher name is supplied, use cbc-plain.
1467 */
1078 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) { 1468 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) {
1079 kfree(cc->cipher_mode);
1080 cc->cipher_mode = kstrdup("cbc-plain", GFP_KERNEL);
1081 chainmode = "cbc"; 1469 chainmode = "cbc";
1082 ivmode = "plain"; 1470 ivmode = "plain";
1083 } 1471 }
@@ -1099,11 +1487,12 @@ static int crypt_ctr_cipher(struct dm_target *ti,
1099 } 1487 }
1100 1488
1101 /* Allocate cipher */ 1489 /* Allocate cipher */
1102 cc->tfm = crypto_alloc_ablkcipher(cipher_api, 0, 0); 1490 for_each_possible_cpu(cpu) {
1103 if (IS_ERR(cc->tfm)) { 1491 ret = crypt_alloc_tfms(cc, cpu, cipher_api);
1104 ret = PTR_ERR(cc->tfm); 1492 if (ret < 0) {
1105 ti->error = "Error allocating crypto tfm"; 1493 ti->error = "Error allocating crypto tfm";
1106 goto bad; 1494 goto bad;
1495 }
1107 } 1496 }
1108 1497
1109 /* Initialize and set key */ 1498 /* Initialize and set key */
@@ -1114,7 +1503,7 @@ static int crypt_ctr_cipher(struct dm_target *ti,
1114 } 1503 }
1115 1504
1116 /* Initialize IV */ 1505 /* Initialize IV */
1117 cc->iv_size = crypto_ablkcipher_ivsize(cc->tfm); 1506 cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
1118 if (cc->iv_size) 1507 if (cc->iv_size)
1119 /* at least a 64 bit sector number should fit in our buffer */ 1508 /* at least a 64 bit sector number should fit in our buffer */
1120 cc->iv_size = max(cc->iv_size, 1509 cc->iv_size = max(cc->iv_size,
@@ -1137,7 +1526,15 @@ static int crypt_ctr_cipher(struct dm_target *ti,
1137 cc->iv_gen_ops = &crypt_iv_benbi_ops; 1526 cc->iv_gen_ops = &crypt_iv_benbi_ops;
1138 else if (strcmp(ivmode, "null") == 0) 1527 else if (strcmp(ivmode, "null") == 0)
1139 cc->iv_gen_ops = &crypt_iv_null_ops; 1528 cc->iv_gen_ops = &crypt_iv_null_ops;
1140 else { 1529 else if (strcmp(ivmode, "lmk") == 0) {
1530 cc->iv_gen_ops = &crypt_iv_lmk_ops;
1531 /* Version 2 and 3 is recognised according
1532 * to length of provided multi-key string.
1533 * If present (version 3), last key is used as IV seed.
1534 */
1535 if (cc->key_size % cc->key_parts)
1536 cc->key_parts++;
1537 } else {
1141 ret = -EINVAL; 1538 ret = -EINVAL;
1142 ti->error = "Invalid IV mode"; 1539 ti->error = "Invalid IV mode";
1143 goto bad; 1540 goto bad;
@@ -1194,6 +1591,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1194 ti->error = "Cannot allocate encryption context"; 1591 ti->error = "Cannot allocate encryption context";
1195 return -ENOMEM; 1592 return -ENOMEM;
1196 } 1593 }
1594 cc->key_size = key_size;
1197 1595
1198 ti->private = cc; 1596 ti->private = cc;
1199 ret = crypt_ctr_cipher(ti, argv[0], argv[1]); 1597 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
@@ -1208,9 +1606,9 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1208 } 1606 }
1209 1607
1210 cc->dmreq_start = sizeof(struct ablkcipher_request); 1608 cc->dmreq_start = sizeof(struct ablkcipher_request);
1211 cc->dmreq_start += crypto_ablkcipher_reqsize(cc->tfm); 1609 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
1212 cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); 1610 cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
1213 cc->dmreq_start += crypto_ablkcipher_alignmask(cc->tfm) & 1611 cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) &
1214 ~(crypto_tfm_ctx_alignment() - 1); 1612 ~(crypto_tfm_ctx_alignment() - 1);
1215 1613
1216 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + 1614 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
@@ -1219,7 +1617,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1219 ti->error = "Cannot allocate crypt request mempool"; 1617 ti->error = "Cannot allocate crypt request mempool";
1220 goto bad; 1618 goto bad;
1221 } 1619 }
1222 cc->req = NULL;
1223 1620
1224 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); 1621 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
1225 if (!cc->page_pool) { 1622 if (!cc->page_pool) {
@@ -1252,13 +1649,20 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1252 cc->start = tmpll; 1649 cc->start = tmpll;
1253 1650
1254 ret = -ENOMEM; 1651 ret = -ENOMEM;
1255 cc->io_queue = create_singlethread_workqueue("kcryptd_io"); 1652 cc->io_queue = alloc_workqueue("kcryptd_io",
1653 WQ_NON_REENTRANT|
1654 WQ_MEM_RECLAIM,
1655 1);
1256 if (!cc->io_queue) { 1656 if (!cc->io_queue) {
1257 ti->error = "Couldn't create kcryptd io queue"; 1657 ti->error = "Couldn't create kcryptd io queue";
1258 goto bad; 1658 goto bad;
1259 } 1659 }
1260 1660
1261 cc->crypt_queue = create_singlethread_workqueue("kcryptd"); 1661 cc->crypt_queue = alloc_workqueue("kcryptd",
1662 WQ_NON_REENTRANT|
1663 WQ_CPU_INTENSIVE|
1664 WQ_MEM_RECLAIM,
1665 1);
1262 if (!cc->crypt_queue) { 1666 if (!cc->crypt_queue) {
1263 ti->error = "Couldn't create kcryptd queue"; 1667 ti->error = "Couldn't create kcryptd queue";
1264 goto bad; 1668 goto bad;
@@ -1278,7 +1682,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
1278 struct dm_crypt_io *io; 1682 struct dm_crypt_io *io;
1279 struct crypt_config *cc; 1683 struct crypt_config *cc;
1280 1684
1281 if (unlikely(bio_empty_barrier(bio))) { 1685 if (bio->bi_rw & REQ_FLUSH) {
1282 cc = ti->private; 1686 cc = ti->private;
1283 bio->bi_bdev = cc->dev->bdev; 1687 bio->bi_bdev = cc->dev->bdev;
1284 return DM_MAPIO_REMAPPED; 1688 return DM_MAPIO_REMAPPED;
@@ -1286,9 +1690,10 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
1286 1690
1287 io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector)); 1691 io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector));
1288 1692
1289 if (bio_data_dir(io->base_bio) == READ) 1693 if (bio_data_dir(io->base_bio) == READ) {
1290 kcryptd_queue_io(io); 1694 if (kcryptd_io_read(io, GFP_NOWAIT))
1291 else 1695 kcryptd_queue_io(io);
1696 } else
1292 kcryptd_queue_crypt(io); 1697 kcryptd_queue_crypt(io);
1293 1698
1294 return DM_MAPIO_SUBMITTED; 1699 return DM_MAPIO_SUBMITTED;
@@ -1306,10 +1711,7 @@ static int crypt_status(struct dm_target *ti, status_type_t type,
1306 break; 1711 break;
1307 1712
1308 case STATUSTYPE_TABLE: 1713 case STATUSTYPE_TABLE:
1309 if (cc->cipher_mode) 1714 DMEMIT("%s ", cc->cipher_string);
1310 DMEMIT("%s-%s ", cc->cipher, cc->cipher_mode);
1311 else
1312 DMEMIT("%s ", cc->cipher);
1313 1715
1314 if (cc->key_size > 0) { 1716 if (cc->key_size > 0) {
1315 if ((maxlen - sz) < ((cc->key_size << 1) + 1)) 1717 if ((maxlen - sz) < ((cc->key_size << 1) + 1))
@@ -1421,7 +1823,7 @@ static int crypt_iterate_devices(struct dm_target *ti,
1421 1823
1422static struct target_type crypt_target = { 1824static struct target_type crypt_target = {
1423 .name = "crypt", 1825 .name = "crypt",
1424 .version = {1, 7, 0}, 1826 .version = {1, 10, 0},
1425 .module = THIS_MODULE, 1827 .module = THIS_MODULE,
1426 .ctr = crypt_ctr, 1828 .ctr = crypt_ctr,
1427 .dtr = crypt_dtr, 1829 .dtr = crypt_dtr,