diff options
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r-- | drivers/md/dm-crypt.c | 486 |
1 files changed, 320 insertions, 166 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 6b66ee46b87d..b04f98df94ea 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -1,11 +1,12 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2003 Christophe Saout <christophe@saout.de> | 2 | * Copyright (C) 2003 Christophe Saout <christophe@saout.de> |
3 | * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> | 3 | * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> |
4 | * Copyright (C) 2006 Red Hat, Inc. All rights reserved. | 4 | * Copyright (C) 2006-2007 Red Hat, Inc. All rights reserved. |
5 | * | 5 | * |
6 | * This file is released under the GPL. | 6 | * This file is released under the GPL. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/completion.h> | ||
9 | #include <linux/err.h> | 10 | #include <linux/err.h> |
10 | #include <linux/module.h> | 11 | #include <linux/module.h> |
11 | #include <linux/init.h> | 12 | #include <linux/init.h> |
@@ -28,20 +29,10 @@ | |||
28 | #define MESG_STR(x) x, sizeof(x) | 29 | #define MESG_STR(x) x, sizeof(x) |
29 | 30 | ||
30 | /* | 31 | /* |
31 | * per bio private data | ||
32 | */ | ||
33 | struct dm_crypt_io { | ||
34 | struct dm_target *target; | ||
35 | struct bio *base_bio; | ||
36 | struct work_struct work; | ||
37 | atomic_t pending; | ||
38 | int error; | ||
39 | }; | ||
40 | |||
41 | /* | ||
42 | * context holding the current state of a multi-part conversion | 32 | * context holding the current state of a multi-part conversion |
43 | */ | 33 | */ |
44 | struct convert_context { | 34 | struct convert_context { |
35 | struct completion restart; | ||
45 | struct bio *bio_in; | 36 | struct bio *bio_in; |
46 | struct bio *bio_out; | 37 | struct bio *bio_out; |
47 | unsigned int offset_in; | 38 | unsigned int offset_in; |
@@ -49,7 +40,27 @@ struct convert_context { | |||
49 | unsigned int idx_in; | 40 | unsigned int idx_in; |
50 | unsigned int idx_out; | 41 | unsigned int idx_out; |
51 | sector_t sector; | 42 | sector_t sector; |
52 | int write; | 43 | atomic_t pending; |
44 | }; | ||
45 | |||
46 | /* | ||
47 | * per bio private data | ||
48 | */ | ||
49 | struct dm_crypt_io { | ||
50 | struct dm_target *target; | ||
51 | struct bio *base_bio; | ||
52 | struct work_struct work; | ||
53 | |||
54 | struct convert_context ctx; | ||
55 | |||
56 | atomic_t pending; | ||
57 | int error; | ||
58 | sector_t sector; | ||
59 | }; | ||
60 | |||
61 | struct dm_crypt_request { | ||
62 | struct scatterlist sg_in; | ||
63 | struct scatterlist sg_out; | ||
53 | }; | 64 | }; |
54 | 65 | ||
55 | struct crypt_config; | 66 | struct crypt_config; |
@@ -72,10 +83,11 @@ struct crypt_config { | |||
72 | sector_t start; | 83 | sector_t start; |
73 | 84 | ||
74 | /* | 85 | /* |
75 | * pool for per bio private data and | 86 | * pool for per bio private data, crypto requests and |
76 | * for encryption buffer pages | 87 | * encryption requeusts/buffer pages |
77 | */ | 88 | */ |
78 | mempool_t *io_pool; | 89 | mempool_t *io_pool; |
90 | mempool_t *req_pool; | ||
79 | mempool_t *page_pool; | 91 | mempool_t *page_pool; |
80 | struct bio_set *bs; | 92 | struct bio_set *bs; |
81 | 93 | ||
@@ -93,9 +105,25 @@ struct crypt_config { | |||
93 | sector_t iv_offset; | 105 | sector_t iv_offset; |
94 | unsigned int iv_size; | 106 | unsigned int iv_size; |
95 | 107 | ||
108 | /* | ||
109 | * Layout of each crypto request: | ||
110 | * | ||
111 | * struct ablkcipher_request | ||
112 | * context | ||
113 | * padding | ||
114 | * struct dm_crypt_request | ||
115 | * padding | ||
116 | * IV | ||
117 | * | ||
118 | * The padding is added so that dm_crypt_request and the IV are | ||
119 | * correctly aligned. | ||
120 | */ | ||
121 | unsigned int dmreq_start; | ||
122 | struct ablkcipher_request *req; | ||
123 | |||
96 | char cipher[CRYPTO_MAX_ALG_NAME]; | 124 | char cipher[CRYPTO_MAX_ALG_NAME]; |
97 | char chainmode[CRYPTO_MAX_ALG_NAME]; | 125 | char chainmode[CRYPTO_MAX_ALG_NAME]; |
98 | struct crypto_blkcipher *tfm; | 126 | struct crypto_ablkcipher *tfm; |
99 | unsigned long flags; | 127 | unsigned long flags; |
100 | unsigned int key_size; | 128 | unsigned int key_size; |
101 | u8 key[0]; | 129 | u8 key[0]; |
@@ -108,6 +136,7 @@ struct crypt_config { | |||
108 | static struct kmem_cache *_crypt_io_pool; | 136 | static struct kmem_cache *_crypt_io_pool; |
109 | 137 | ||
110 | static void clone_init(struct dm_crypt_io *, struct bio *); | 138 | static void clone_init(struct dm_crypt_io *, struct bio *); |
139 | static void kcryptd_queue_crypt(struct dm_crypt_io *io); | ||
111 | 140 | ||
112 | /* | 141 | /* |
113 | * Different IV generation algorithms: | 142 | * Different IV generation algorithms: |
@@ -188,7 +217,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, | |||
188 | return PTR_ERR(essiv_tfm); | 217 | return PTR_ERR(essiv_tfm); |
189 | } | 218 | } |
190 | if (crypto_cipher_blocksize(essiv_tfm) != | 219 | if (crypto_cipher_blocksize(essiv_tfm) != |
191 | crypto_blkcipher_ivsize(cc->tfm)) { | 220 | crypto_ablkcipher_ivsize(cc->tfm)) { |
192 | ti->error = "Block size of ESSIV cipher does " | 221 | ti->error = "Block size of ESSIV cipher does " |
193 | "not match IV size of block cipher"; | 222 | "not match IV size of block cipher"; |
194 | crypto_free_cipher(essiv_tfm); | 223 | crypto_free_cipher(essiv_tfm); |
@@ -225,7 +254,7 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) | |||
225 | static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, | 254 | static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, |
226 | const char *opts) | 255 | const char *opts) |
227 | { | 256 | { |
228 | unsigned int bs = crypto_blkcipher_blocksize(cc->tfm); | 257 | unsigned bs = crypto_ablkcipher_blocksize(cc->tfm); |
229 | int log = ilog2(bs); | 258 | int log = ilog2(bs); |
230 | 259 | ||
231 | /* we need to calculate how far we must shift the sector count | 260 | /* we need to calculate how far we must shift the sector count |
@@ -289,42 +318,10 @@ static struct crypt_iv_operations crypt_iv_null_ops = { | |||
289 | .generator = crypt_iv_null_gen | 318 | .generator = crypt_iv_null_gen |
290 | }; | 319 | }; |
291 | 320 | ||
292 | static int | ||
293 | crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out, | ||
294 | struct scatterlist *in, unsigned int length, | ||
295 | int write, sector_t sector) | ||
296 | { | ||
297 | u8 iv[cc->iv_size] __attribute__ ((aligned(__alignof__(u64)))); | ||
298 | struct blkcipher_desc desc = { | ||
299 | .tfm = cc->tfm, | ||
300 | .info = iv, | ||
301 | .flags = CRYPTO_TFM_REQ_MAY_SLEEP, | ||
302 | }; | ||
303 | int r; | ||
304 | |||
305 | if (cc->iv_gen_ops) { | ||
306 | r = cc->iv_gen_ops->generator(cc, iv, sector); | ||
307 | if (r < 0) | ||
308 | return r; | ||
309 | |||
310 | if (write) | ||
311 | r = crypto_blkcipher_encrypt_iv(&desc, out, in, length); | ||
312 | else | ||
313 | r = crypto_blkcipher_decrypt_iv(&desc, out, in, length); | ||
314 | } else { | ||
315 | if (write) | ||
316 | r = crypto_blkcipher_encrypt(&desc, out, in, length); | ||
317 | else | ||
318 | r = crypto_blkcipher_decrypt(&desc, out, in, length); | ||
319 | } | ||
320 | |||
321 | return r; | ||
322 | } | ||
323 | |||
324 | static void crypt_convert_init(struct crypt_config *cc, | 321 | static void crypt_convert_init(struct crypt_config *cc, |
325 | struct convert_context *ctx, | 322 | struct convert_context *ctx, |
326 | struct bio *bio_out, struct bio *bio_in, | 323 | struct bio *bio_out, struct bio *bio_in, |
327 | sector_t sector, int write) | 324 | sector_t sector) |
328 | { | 325 | { |
329 | ctx->bio_in = bio_in; | 326 | ctx->bio_in = bio_in; |
330 | ctx->bio_out = bio_out; | 327 | ctx->bio_out = bio_out; |
@@ -333,7 +330,79 @@ static void crypt_convert_init(struct crypt_config *cc, | |||
333 | ctx->idx_in = bio_in ? bio_in->bi_idx : 0; | 330 | ctx->idx_in = bio_in ? bio_in->bi_idx : 0; |
334 | ctx->idx_out = bio_out ? bio_out->bi_idx : 0; | 331 | ctx->idx_out = bio_out ? bio_out->bi_idx : 0; |
335 | ctx->sector = sector + cc->iv_offset; | 332 | ctx->sector = sector + cc->iv_offset; |
336 | ctx->write = write; | 333 | init_completion(&ctx->restart); |
334 | /* | ||
335 | * Crypto operation can be asynchronous, | ||
336 | * ctx->pending is increased after request submission. | ||
337 | * We need to ensure that we don't call the crypt finish | ||
338 | * operation before pending got incremented | ||
339 | * (dependent on crypt submission return code). | ||
340 | */ | ||
341 | atomic_set(&ctx->pending, 2); | ||
342 | } | ||
343 | |||
344 | static int crypt_convert_block(struct crypt_config *cc, | ||
345 | struct convert_context *ctx, | ||
346 | struct ablkcipher_request *req) | ||
347 | { | ||
348 | struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); | ||
349 | struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); | ||
350 | struct dm_crypt_request *dmreq; | ||
351 | u8 *iv; | ||
352 | int r = 0; | ||
353 | |||
354 | dmreq = (struct dm_crypt_request *)((char *)req + cc->dmreq_start); | ||
355 | iv = (u8 *)ALIGN((unsigned long)(dmreq + 1), | ||
356 | crypto_ablkcipher_alignmask(cc->tfm) + 1); | ||
357 | |||
358 | sg_init_table(&dmreq->sg_in, 1); | ||
359 | sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, | ||
360 | bv_in->bv_offset + ctx->offset_in); | ||
361 | |||
362 | sg_init_table(&dmreq->sg_out, 1); | ||
363 | sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, | ||
364 | bv_out->bv_offset + ctx->offset_out); | ||
365 | |||
366 | ctx->offset_in += 1 << SECTOR_SHIFT; | ||
367 | if (ctx->offset_in >= bv_in->bv_len) { | ||
368 | ctx->offset_in = 0; | ||
369 | ctx->idx_in++; | ||
370 | } | ||
371 | |||
372 | ctx->offset_out += 1 << SECTOR_SHIFT; | ||
373 | if (ctx->offset_out >= bv_out->bv_len) { | ||
374 | ctx->offset_out = 0; | ||
375 | ctx->idx_out++; | ||
376 | } | ||
377 | |||
378 | if (cc->iv_gen_ops) { | ||
379 | r = cc->iv_gen_ops->generator(cc, iv, ctx->sector); | ||
380 | if (r < 0) | ||
381 | return r; | ||
382 | } | ||
383 | |||
384 | ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out, | ||
385 | 1 << SECTOR_SHIFT, iv); | ||
386 | |||
387 | if (bio_data_dir(ctx->bio_in) == WRITE) | ||
388 | r = crypto_ablkcipher_encrypt(req); | ||
389 | else | ||
390 | r = crypto_ablkcipher_decrypt(req); | ||
391 | |||
392 | return r; | ||
393 | } | ||
394 | |||
395 | static void kcryptd_async_done(struct crypto_async_request *async_req, | ||
396 | int error); | ||
397 | static void crypt_alloc_req(struct crypt_config *cc, | ||
398 | struct convert_context *ctx) | ||
399 | { | ||
400 | if (!cc->req) | ||
401 | cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); | ||
402 | ablkcipher_request_set_tfm(cc->req, cc->tfm); | ||
403 | ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
404 | CRYPTO_TFM_REQ_MAY_SLEEP, | ||
405 | kcryptd_async_done, ctx); | ||
337 | } | 406 | } |
338 | 407 | ||
339 | /* | 408 | /* |
@@ -346,36 +415,38 @@ static int crypt_convert(struct crypt_config *cc, | |||
346 | 415 | ||
347 | while(ctx->idx_in < ctx->bio_in->bi_vcnt && | 416 | while(ctx->idx_in < ctx->bio_in->bi_vcnt && |
348 | ctx->idx_out < ctx->bio_out->bi_vcnt) { | 417 | ctx->idx_out < ctx->bio_out->bi_vcnt) { |
349 | struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); | ||
350 | struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); | ||
351 | struct scatterlist sg_in, sg_out; | ||
352 | |||
353 | sg_init_table(&sg_in, 1); | ||
354 | sg_set_page(&sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, bv_in->bv_offset + ctx->offset_in); | ||
355 | |||
356 | sg_init_table(&sg_out, 1); | ||
357 | sg_set_page(&sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, bv_out->bv_offset + ctx->offset_out); | ||
358 | 418 | ||
359 | ctx->offset_in += sg_in.length; | 419 | crypt_alloc_req(cc, ctx); |
360 | if (ctx->offset_in >= bv_in->bv_len) { | 420 | |
361 | ctx->offset_in = 0; | 421 | r = crypt_convert_block(cc, ctx, cc->req); |
362 | ctx->idx_in++; | 422 | |
423 | switch (r) { | ||
424 | case -EBUSY: | ||
425 | wait_for_completion(&ctx->restart); | ||
426 | INIT_COMPLETION(ctx->restart); | ||
427 | /* fall through*/ | ||
428 | case -EINPROGRESS: | ||
429 | atomic_inc(&ctx->pending); | ||
430 | cc->req = NULL; | ||
431 | r = 0; | ||
432 | /* fall through*/ | ||
433 | case 0: | ||
434 | ctx->sector++; | ||
435 | continue; | ||
363 | } | 436 | } |
364 | 437 | ||
365 | ctx->offset_out += sg_out.length; | 438 | break; |
366 | if (ctx->offset_out >= bv_out->bv_len) { | ||
367 | ctx->offset_out = 0; | ||
368 | ctx->idx_out++; | ||
369 | } | ||
370 | |||
371 | r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length, | ||
372 | ctx->write, ctx->sector); | ||
373 | if (r < 0) | ||
374 | break; | ||
375 | |||
376 | ctx->sector++; | ||
377 | } | 439 | } |
378 | 440 | ||
441 | /* | ||
442 | * If there are pending crypto operation run async | ||
443 | * code. Otherwise process return code synchronously. | ||
444 | * The step of 2 ensures that async finish doesn't | ||
445 | * call crypto finish too early. | ||
446 | */ | ||
447 | if (atomic_sub_return(2, &ctx->pending)) | ||
448 | return -EINPROGRESS; | ||
449 | |||
379 | return r; | 450 | return r; |
380 | } | 451 | } |
381 | 452 | ||
@@ -455,18 +526,14 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) | |||
455 | * One of the bios was finished. Check for completion of | 526 | * One of the bios was finished. Check for completion of |
456 | * the whole request and correctly clean up the buffer. | 527 | * the whole request and correctly clean up the buffer. |
457 | */ | 528 | */ |
458 | static void crypt_dec_pending(struct dm_crypt_io *io, int error) | 529 | static void crypt_dec_pending(struct dm_crypt_io *io) |
459 | { | 530 | { |
460 | struct crypt_config *cc = (struct crypt_config *) io->target->private; | 531 | struct crypt_config *cc = io->target->private; |
461 | |||
462 | if (error < 0) | ||
463 | io->error = error; | ||
464 | 532 | ||
465 | if (!atomic_dec_and_test(&io->pending)) | 533 | if (!atomic_dec_and_test(&io->pending)) |
466 | return; | 534 | return; |
467 | 535 | ||
468 | bio_endio(io->base_bio, io->error); | 536 | bio_endio(io->base_bio, io->error); |
469 | |||
470 | mempool_free(io, cc->io_pool); | 537 | mempool_free(io, cc->io_pool); |
471 | } | 538 | } |
472 | 539 | ||
@@ -484,30 +551,11 @@ static void crypt_dec_pending(struct dm_crypt_io *io, int error) | |||
484 | * starved by new requests which can block in the first stages due | 551 | * starved by new requests which can block in the first stages due |
485 | * to memory allocation. | 552 | * to memory allocation. |
486 | */ | 553 | */ |
487 | static void kcryptd_do_work(struct work_struct *work); | ||
488 | static void kcryptd_do_crypt(struct work_struct *work); | ||
489 | |||
490 | static void kcryptd_queue_io(struct dm_crypt_io *io) | ||
491 | { | ||
492 | struct crypt_config *cc = io->target->private; | ||
493 | |||
494 | INIT_WORK(&io->work, kcryptd_do_work); | ||
495 | queue_work(cc->io_queue, &io->work); | ||
496 | } | ||
497 | |||
498 | static void kcryptd_queue_crypt(struct dm_crypt_io *io) | ||
499 | { | ||
500 | struct crypt_config *cc = io->target->private; | ||
501 | |||
502 | INIT_WORK(&io->work, kcryptd_do_crypt); | ||
503 | queue_work(cc->crypt_queue, &io->work); | ||
504 | } | ||
505 | |||
506 | static void crypt_endio(struct bio *clone, int error) | 554 | static void crypt_endio(struct bio *clone, int error) |
507 | { | 555 | { |
508 | struct dm_crypt_io *io = clone->bi_private; | 556 | struct dm_crypt_io *io = clone->bi_private; |
509 | struct crypt_config *cc = io->target->private; | 557 | struct crypt_config *cc = io->target->private; |
510 | unsigned read_io = bio_data_dir(clone) == READ; | 558 | unsigned rw = bio_data_dir(clone); |
511 | 559 | ||
512 | if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error)) | 560 | if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error)) |
513 | error = -EIO; | 561 | error = -EIO; |
@@ -515,21 +563,20 @@ static void crypt_endio(struct bio *clone, int error) | |||
515 | /* | 563 | /* |
516 | * free the processed pages | 564 | * free the processed pages |
517 | */ | 565 | */ |
518 | if (!read_io) { | 566 | if (rw == WRITE) |
519 | crypt_free_buffer_pages(cc, clone); | 567 | crypt_free_buffer_pages(cc, clone); |
520 | goto out; | 568 | |
569 | bio_put(clone); | ||
570 | |||
571 | if (rw == READ && !error) { | ||
572 | kcryptd_queue_crypt(io); | ||
573 | return; | ||
521 | } | 574 | } |
522 | 575 | ||
523 | if (unlikely(error)) | 576 | if (unlikely(error)) |
524 | goto out; | 577 | io->error = error; |
525 | |||
526 | bio_put(clone); | ||
527 | kcryptd_queue_crypt(io); | ||
528 | return; | ||
529 | 578 | ||
530 | out: | 579 | crypt_dec_pending(io); |
531 | bio_put(clone); | ||
532 | crypt_dec_pending(io, error); | ||
533 | } | 580 | } |
534 | 581 | ||
535 | static void clone_init(struct dm_crypt_io *io, struct bio *clone) | 582 | static void clone_init(struct dm_crypt_io *io, struct bio *clone) |
@@ -543,12 +590,11 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone) | |||
543 | clone->bi_destructor = dm_crypt_bio_destructor; | 590 | clone->bi_destructor = dm_crypt_bio_destructor; |
544 | } | 591 | } |
545 | 592 | ||
546 | static void process_read(struct dm_crypt_io *io) | 593 | static void kcryptd_io_read(struct dm_crypt_io *io) |
547 | { | 594 | { |
548 | struct crypt_config *cc = io->target->private; | 595 | struct crypt_config *cc = io->target->private; |
549 | struct bio *base_bio = io->base_bio; | 596 | struct bio *base_bio = io->base_bio; |
550 | struct bio *clone; | 597 | struct bio *clone; |
551 | sector_t sector = base_bio->bi_sector - io->target->begin; | ||
552 | 598 | ||
553 | atomic_inc(&io->pending); | 599 | atomic_inc(&io->pending); |
554 | 600 | ||
@@ -559,7 +605,8 @@ static void process_read(struct dm_crypt_io *io) | |||
559 | */ | 605 | */ |
560 | clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs); | 606 | clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs); |
561 | if (unlikely(!clone)) { | 607 | if (unlikely(!clone)) { |
562 | crypt_dec_pending(io, -ENOMEM); | 608 | io->error = -ENOMEM; |
609 | crypt_dec_pending(io); | ||
563 | return; | 610 | return; |
564 | } | 611 | } |
565 | 612 | ||
@@ -567,25 +614,71 @@ static void process_read(struct dm_crypt_io *io) | |||
567 | clone->bi_idx = 0; | 614 | clone->bi_idx = 0; |
568 | clone->bi_vcnt = bio_segments(base_bio); | 615 | clone->bi_vcnt = bio_segments(base_bio); |
569 | clone->bi_size = base_bio->bi_size; | 616 | clone->bi_size = base_bio->bi_size; |
570 | clone->bi_sector = cc->start + sector; | 617 | clone->bi_sector = cc->start + io->sector; |
571 | memcpy(clone->bi_io_vec, bio_iovec(base_bio), | 618 | memcpy(clone->bi_io_vec, bio_iovec(base_bio), |
572 | sizeof(struct bio_vec) * clone->bi_vcnt); | 619 | sizeof(struct bio_vec) * clone->bi_vcnt); |
573 | 620 | ||
574 | generic_make_request(clone); | 621 | generic_make_request(clone); |
575 | } | 622 | } |
576 | 623 | ||
577 | static void process_write(struct dm_crypt_io *io) | 624 | static void kcryptd_io_write(struct dm_crypt_io *io) |
625 | { | ||
626 | struct bio *clone = io->ctx.bio_out; | ||
627 | |||
628 | generic_make_request(clone); | ||
629 | } | ||
630 | |||
631 | static void kcryptd_io(struct work_struct *work) | ||
632 | { | ||
633 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); | ||
634 | |||
635 | if (bio_data_dir(io->base_bio) == READ) | ||
636 | kcryptd_io_read(io); | ||
637 | else | ||
638 | kcryptd_io_write(io); | ||
639 | } | ||
640 | |||
641 | static void kcryptd_queue_io(struct dm_crypt_io *io) | ||
578 | { | 642 | { |
579 | struct crypt_config *cc = io->target->private; | 643 | struct crypt_config *cc = io->target->private; |
580 | struct bio *base_bio = io->base_bio; | ||
581 | struct bio *clone; | ||
582 | struct convert_context ctx; | ||
583 | unsigned remaining = base_bio->bi_size; | ||
584 | sector_t sector = base_bio->bi_sector - io->target->begin; | ||
585 | 644 | ||
586 | atomic_inc(&io->pending); | 645 | INIT_WORK(&io->work, kcryptd_io); |
646 | queue_work(cc->io_queue, &io->work); | ||
647 | } | ||
587 | 648 | ||
588 | crypt_convert_init(cc, &ctx, NULL, base_bio, sector, 1); | 649 | static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, |
650 | int error, int async) | ||
651 | { | ||
652 | struct bio *clone = io->ctx.bio_out; | ||
653 | struct crypt_config *cc = io->target->private; | ||
654 | |||
655 | if (unlikely(error < 0)) { | ||
656 | crypt_free_buffer_pages(cc, clone); | ||
657 | bio_put(clone); | ||
658 | io->error = -EIO; | ||
659 | return; | ||
660 | } | ||
661 | |||
662 | /* crypt_convert should have filled the clone bio */ | ||
663 | BUG_ON(io->ctx.idx_out < clone->bi_vcnt); | ||
664 | |||
665 | clone->bi_sector = cc->start + io->sector; | ||
666 | io->sector += bio_sectors(clone); | ||
667 | |||
668 | if (async) | ||
669 | kcryptd_queue_io(io); | ||
670 | else { | ||
671 | atomic_inc(&io->pending); | ||
672 | generic_make_request(clone); | ||
673 | } | ||
674 | } | ||
675 | |||
676 | static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io) | ||
677 | { | ||
678 | struct crypt_config *cc = io->target->private; | ||
679 | struct bio *clone; | ||
680 | unsigned remaining = io->base_bio->bi_size; | ||
681 | int r; | ||
589 | 682 | ||
590 | /* | 683 | /* |
591 | * The allocated buffers can be smaller than the whole bio, | 684 | * The allocated buffers can be smaller than the whole bio, |
@@ -594,70 +687,110 @@ static void process_write(struct dm_crypt_io *io) | |||
594 | while (remaining) { | 687 | while (remaining) { |
595 | clone = crypt_alloc_buffer(io, remaining); | 688 | clone = crypt_alloc_buffer(io, remaining); |
596 | if (unlikely(!clone)) { | 689 | if (unlikely(!clone)) { |
597 | crypt_dec_pending(io, -ENOMEM); | 690 | io->error = -ENOMEM; |
598 | return; | 691 | return; |
599 | } | 692 | } |
600 | 693 | ||
601 | ctx.bio_out = clone; | 694 | io->ctx.bio_out = clone; |
602 | ctx.idx_out = 0; | 695 | io->ctx.idx_out = 0; |
603 | 696 | ||
604 | if (unlikely(crypt_convert(cc, &ctx) < 0)) { | ||
605 | crypt_free_buffer_pages(cc, clone); | ||
606 | bio_put(clone); | ||
607 | crypt_dec_pending(io, -EIO); | ||
608 | return; | ||
609 | } | ||
610 | |||
611 | /* crypt_convert should have filled the clone bio */ | ||
612 | BUG_ON(ctx.idx_out < clone->bi_vcnt); | ||
613 | |||
614 | clone->bi_sector = cc->start + sector; | ||
615 | remaining -= clone->bi_size; | 697 | remaining -= clone->bi_size; |
616 | sector += bio_sectors(clone); | ||
617 | 698 | ||
618 | /* Grab another reference to the io struct | 699 | r = crypt_convert(cc, &io->ctx); |
619 | * before we kick off the request */ | ||
620 | if (remaining) | ||
621 | atomic_inc(&io->pending); | ||
622 | 700 | ||
623 | generic_make_request(clone); | 701 | if (r != -EINPROGRESS) { |
624 | 702 | kcryptd_crypt_write_io_submit(io, r, 0); | |
625 | /* Do not reference clone after this - it | 703 | if (unlikely(r < 0)) |
626 | * may be gone already. */ | 704 | return; |
705 | } else | ||
706 | atomic_inc(&io->pending); | ||
627 | 707 | ||
628 | /* out of memory -> run queues */ | 708 | /* out of memory -> run queues */ |
629 | if (remaining) | 709 | if (unlikely(remaining)) |
630 | congestion_wait(WRITE, HZ/100); | 710 | congestion_wait(WRITE, HZ/100); |
631 | } | 711 | } |
632 | } | 712 | } |
633 | 713 | ||
634 | static void process_read_endio(struct dm_crypt_io *io) | 714 | static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) |
635 | { | 715 | { |
636 | struct crypt_config *cc = io->target->private; | 716 | struct crypt_config *cc = io->target->private; |
637 | struct convert_context ctx; | ||
638 | 717 | ||
639 | crypt_convert_init(cc, &ctx, io->base_bio, io->base_bio, | 718 | /* |
640 | io->base_bio->bi_sector - io->target->begin, 0); | 719 | * Prevent io from disappearing until this function completes. |
720 | */ | ||
721 | atomic_inc(&io->pending); | ||
722 | |||
723 | crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector); | ||
724 | kcryptd_crypt_write_convert_loop(io); | ||
641 | 725 | ||
642 | crypt_dec_pending(io, crypt_convert(cc, &ctx)); | 726 | crypt_dec_pending(io); |
643 | } | 727 | } |
644 | 728 | ||
645 | static void kcryptd_do_work(struct work_struct *work) | 729 | static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error) |
646 | { | 730 | { |
647 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); | 731 | if (unlikely(error < 0)) |
732 | io->error = -EIO; | ||
733 | |||
734 | crypt_dec_pending(io); | ||
735 | } | ||
736 | |||
737 | static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) | ||
738 | { | ||
739 | struct crypt_config *cc = io->target->private; | ||
740 | int r = 0; | ||
741 | |||
742 | atomic_inc(&io->pending); | ||
743 | |||
744 | crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, | ||
745 | io->sector); | ||
746 | |||
747 | r = crypt_convert(cc, &io->ctx); | ||
748 | |||
749 | if (r != -EINPROGRESS) | ||
750 | kcryptd_crypt_read_done(io, r); | ||
751 | |||
752 | crypt_dec_pending(io); | ||
753 | } | ||
754 | |||
755 | static void kcryptd_async_done(struct crypto_async_request *async_req, | ||
756 | int error) | ||
757 | { | ||
758 | struct convert_context *ctx = async_req->data; | ||
759 | struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); | ||
760 | struct crypt_config *cc = io->target->private; | ||
761 | |||
762 | if (error == -EINPROGRESS) { | ||
763 | complete(&ctx->restart); | ||
764 | return; | ||
765 | } | ||
766 | |||
767 | mempool_free(ablkcipher_request_cast(async_req), cc->req_pool); | ||
768 | |||
769 | if (!atomic_dec_and_test(&ctx->pending)) | ||
770 | return; | ||
648 | 771 | ||
649 | if (bio_data_dir(io->base_bio) == READ) | 772 | if (bio_data_dir(io->base_bio) == READ) |
650 | process_read(io); | 773 | kcryptd_crypt_read_done(io, error); |
774 | else | ||
775 | kcryptd_crypt_write_io_submit(io, error, 1); | ||
651 | } | 776 | } |
652 | 777 | ||
653 | static void kcryptd_do_crypt(struct work_struct *work) | 778 | static void kcryptd_crypt(struct work_struct *work) |
654 | { | 779 | { |
655 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); | 780 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); |
656 | 781 | ||
657 | if (bio_data_dir(io->base_bio) == READ) | 782 | if (bio_data_dir(io->base_bio) == READ) |
658 | process_read_endio(io); | 783 | kcryptd_crypt_read_convert(io); |
659 | else | 784 | else |
660 | process_write(io); | 785 | kcryptd_crypt_write_convert(io); |
786 | } | ||
787 | |||
788 | static void kcryptd_queue_crypt(struct dm_crypt_io *io) | ||
789 | { | ||
790 | struct crypt_config *cc = io->target->private; | ||
791 | |||
792 | INIT_WORK(&io->work, kcryptd_crypt); | ||
793 | queue_work(cc->crypt_queue, &io->work); | ||
661 | } | 794 | } |
662 | 795 | ||
663 | /* | 796 | /* |
@@ -733,7 +866,7 @@ static int crypt_wipe_key(struct crypt_config *cc) | |||
733 | static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | 866 | static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) |
734 | { | 867 | { |
735 | struct crypt_config *cc; | 868 | struct crypt_config *cc; |
736 | struct crypto_blkcipher *tfm; | 869 | struct crypto_ablkcipher *tfm; |
737 | char *tmp; | 870 | char *tmp; |
738 | char *cipher; | 871 | char *cipher; |
739 | char *chainmode; | 872 | char *chainmode; |
@@ -787,7 +920,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
787 | goto bad_cipher; | 920 | goto bad_cipher; |
788 | } | 921 | } |
789 | 922 | ||
790 | tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); | 923 | tfm = crypto_alloc_ablkcipher(cc->cipher, 0, 0); |
791 | if (IS_ERR(tfm)) { | 924 | if (IS_ERR(tfm)) { |
792 | ti->error = "Error allocating crypto tfm"; | 925 | ti->error = "Error allocating crypto tfm"; |
793 | goto bad_cipher; | 926 | goto bad_cipher; |
@@ -821,7 +954,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
821 | cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) | 954 | cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) |
822 | goto bad_ivmode; | 955 | goto bad_ivmode; |
823 | 956 | ||
824 | cc->iv_size = crypto_blkcipher_ivsize(tfm); | 957 | cc->iv_size = crypto_ablkcipher_ivsize(tfm); |
825 | if (cc->iv_size) | 958 | if (cc->iv_size) |
826 | /* at least a 64 bit sector number should fit in our buffer */ | 959 | /* at least a 64 bit sector number should fit in our buffer */ |
827 | cc->iv_size = max(cc->iv_size, | 960 | cc->iv_size = max(cc->iv_size, |
@@ -841,6 +974,20 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
841 | goto bad_slab_pool; | 974 | goto bad_slab_pool; |
842 | } | 975 | } |
843 | 976 | ||
977 | cc->dmreq_start = sizeof(struct ablkcipher_request); | ||
978 | cc->dmreq_start += crypto_ablkcipher_reqsize(tfm); | ||
979 | cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); | ||
980 | cc->dmreq_start += crypto_ablkcipher_alignmask(tfm) & | ||
981 | ~(crypto_tfm_ctx_alignment() - 1); | ||
982 | |||
983 | cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + | ||
984 | sizeof(struct dm_crypt_request) + cc->iv_size); | ||
985 | if (!cc->req_pool) { | ||
986 | ti->error = "Cannot allocate crypt request mempool"; | ||
987 | goto bad_req_pool; | ||
988 | } | ||
989 | cc->req = NULL; | ||
990 | |||
844 | cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); | 991 | cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); |
845 | if (!cc->page_pool) { | 992 | if (!cc->page_pool) { |
846 | ti->error = "Cannot allocate page mempool"; | 993 | ti->error = "Cannot allocate page mempool"; |
@@ -853,7 +1000,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
853 | goto bad_bs; | 1000 | goto bad_bs; |
854 | } | 1001 | } |
855 | 1002 | ||
856 | if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) { | 1003 | if (crypto_ablkcipher_setkey(tfm, cc->key, key_size) < 0) { |
857 | ti->error = "Error setting key"; | 1004 | ti->error = "Error setting key"; |
858 | goto bad_device; | 1005 | goto bad_device; |
859 | } | 1006 | } |
@@ -914,12 +1061,14 @@ bad_device: | |||
914 | bad_bs: | 1061 | bad_bs: |
915 | mempool_destroy(cc->page_pool); | 1062 | mempool_destroy(cc->page_pool); |
916 | bad_page_pool: | 1063 | bad_page_pool: |
1064 | mempool_destroy(cc->req_pool); | ||
1065 | bad_req_pool: | ||
917 | mempool_destroy(cc->io_pool); | 1066 | mempool_destroy(cc->io_pool); |
918 | bad_slab_pool: | 1067 | bad_slab_pool: |
919 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) | 1068 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) |
920 | cc->iv_gen_ops->dtr(cc); | 1069 | cc->iv_gen_ops->dtr(cc); |
921 | bad_ivmode: | 1070 | bad_ivmode: |
922 | crypto_free_blkcipher(tfm); | 1071 | crypto_free_ablkcipher(tfm); |
923 | bad_cipher: | 1072 | bad_cipher: |
924 | /* Must zero key material before freeing */ | 1073 | /* Must zero key material before freeing */ |
925 | memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); | 1074 | memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); |
@@ -934,14 +1083,18 @@ static void crypt_dtr(struct dm_target *ti) | |||
934 | destroy_workqueue(cc->io_queue); | 1083 | destroy_workqueue(cc->io_queue); |
935 | destroy_workqueue(cc->crypt_queue); | 1084 | destroy_workqueue(cc->crypt_queue); |
936 | 1085 | ||
1086 | if (cc->req) | ||
1087 | mempool_free(cc->req, cc->req_pool); | ||
1088 | |||
937 | bioset_free(cc->bs); | 1089 | bioset_free(cc->bs); |
938 | mempool_destroy(cc->page_pool); | 1090 | mempool_destroy(cc->page_pool); |
1091 | mempool_destroy(cc->req_pool); | ||
939 | mempool_destroy(cc->io_pool); | 1092 | mempool_destroy(cc->io_pool); |
940 | 1093 | ||
941 | kfree(cc->iv_mode); | 1094 | kfree(cc->iv_mode); |
942 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) | 1095 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) |
943 | cc->iv_gen_ops->dtr(cc); | 1096 | cc->iv_gen_ops->dtr(cc); |
944 | crypto_free_blkcipher(cc->tfm); | 1097 | crypto_free_ablkcipher(cc->tfm); |
945 | dm_put_device(ti, cc->dev); | 1098 | dm_put_device(ti, cc->dev); |
946 | 1099 | ||
947 | /* Must zero key material before freeing */ | 1100 | /* Must zero key material before freeing */ |
@@ -958,6 +1111,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, | |||
958 | io = mempool_alloc(cc->io_pool, GFP_NOIO); | 1111 | io = mempool_alloc(cc->io_pool, GFP_NOIO); |
959 | io->target = ti; | 1112 | io->target = ti; |
960 | io->base_bio = bio; | 1113 | io->base_bio = bio; |
1114 | io->sector = bio->bi_sector - ti->begin; | ||
961 | io->error = 0; | 1115 | io->error = 0; |
962 | atomic_set(&io->pending, 0); | 1116 | atomic_set(&io->pending, 0); |
963 | 1117 | ||