diff options
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r-- | drivers/md/dm-crypt.c | 91 |
1 files changed, 40 insertions, 51 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index d8121234c347..7b0fcfc9eaa5 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -33,7 +33,6 @@ | |||
33 | struct crypt_io { | 33 | struct crypt_io { |
34 | struct dm_target *target; | 34 | struct dm_target *target; |
35 | struct bio *base_bio; | 35 | struct bio *base_bio; |
36 | struct bio *first_clone; | ||
37 | struct work_struct work; | 36 | struct work_struct work; |
38 | atomic_t pending; | 37 | atomic_t pending; |
39 | int error; | 38 | int error; |
@@ -107,6 +106,8 @@ struct crypt_config { | |||
107 | 106 | ||
108 | static struct kmem_cache *_crypt_io_pool; | 107 | static struct kmem_cache *_crypt_io_pool; |
109 | 108 | ||
109 | static void clone_init(struct crypt_io *, struct bio *); | ||
110 | |||
110 | /* | 111 | /* |
111 | * Different IV generation algorithms: | 112 | * Different IV generation algorithms: |
112 | * | 113 | * |
@@ -120,6 +121,9 @@ static struct kmem_cache *_crypt_io_pool; | |||
120 | * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 | 121 | * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 |
121 | * (needed for LRW-32-AES and possible other narrow block modes) | 122 | * (needed for LRW-32-AES and possible other narrow block modes) |
122 | * | 123 | * |
124 | * null: the initial vector is always zero. Provides compatibility with | ||
125 | * obsolete loop_fish2 devices. Do not use for new devices. | ||
126 | * | ||
123 | * plumb: unimplemented, see: | 127 | * plumb: unimplemented, see: |
124 | * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 | 128 | * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 |
125 | */ | 129 | */ |
@@ -256,6 +260,13 @@ static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector) | |||
256 | return 0; | 260 | return 0; |
257 | } | 261 | } |
258 | 262 | ||
263 | static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector) | ||
264 | { | ||
265 | memset(iv, 0, cc->iv_size); | ||
266 | |||
267 | return 0; | ||
268 | } | ||
269 | |||
259 | static struct crypt_iv_operations crypt_iv_plain_ops = { | 270 | static struct crypt_iv_operations crypt_iv_plain_ops = { |
260 | .generator = crypt_iv_plain_gen | 271 | .generator = crypt_iv_plain_gen |
261 | }; | 272 | }; |
@@ -272,6 +283,10 @@ static struct crypt_iv_operations crypt_iv_benbi_ops = { | |||
272 | .generator = crypt_iv_benbi_gen | 283 | .generator = crypt_iv_benbi_gen |
273 | }; | 284 | }; |
274 | 285 | ||
286 | static struct crypt_iv_operations crypt_iv_null_ops = { | ||
287 | .generator = crypt_iv_null_gen | ||
288 | }; | ||
289 | |||
275 | static int | 290 | static int |
276 | crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out, | 291 | crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out, |
277 | struct scatterlist *in, unsigned int length, | 292 | struct scatterlist *in, unsigned int length, |
@@ -378,36 +393,21 @@ static int crypt_convert(struct crypt_config *cc, | |||
378 | * This should never violate the device limitations | 393 | * This should never violate the device limitations |
379 | * May return a smaller bio when running out of pages | 394 | * May return a smaller bio when running out of pages |
380 | */ | 395 | */ |
381 | static struct bio * | 396 | static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size) |
382 | crypt_alloc_buffer(struct crypt_config *cc, unsigned int size, | ||
383 | struct bio *base_bio, unsigned int *bio_vec_idx) | ||
384 | { | 397 | { |
398 | struct crypt_config *cc = io->target->private; | ||
385 | struct bio *clone; | 399 | struct bio *clone; |
386 | unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | 400 | unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
387 | gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; | 401 | gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; |
388 | unsigned int i; | 402 | unsigned int i; |
389 | 403 | ||
390 | if (base_bio) { | 404 | clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); |
391 | clone = bio_alloc_bioset(GFP_NOIO, base_bio->bi_max_vecs, cc->bs); | ||
392 | __bio_clone(clone, base_bio); | ||
393 | } else | ||
394 | clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); | ||
395 | |||
396 | if (!clone) | 405 | if (!clone) |
397 | return NULL; | 406 | return NULL; |
398 | 407 | ||
399 | clone->bi_destructor = dm_crypt_bio_destructor; | 408 | clone_init(io, clone); |
400 | |||
401 | /* if the last bio was not complete, continue where that one ended */ | ||
402 | clone->bi_idx = *bio_vec_idx; | ||
403 | clone->bi_vcnt = *bio_vec_idx; | ||
404 | clone->bi_size = 0; | ||
405 | clone->bi_flags &= ~(1 << BIO_SEG_VALID); | ||
406 | |||
407 | /* clone->bi_idx pages have already been allocated */ | ||
408 | size -= clone->bi_idx * PAGE_SIZE; | ||
409 | 409 | ||
410 | for (i = clone->bi_idx; i < nr_iovecs; i++) { | 410 | for (i = 0; i < nr_iovecs; i++) { |
411 | struct bio_vec *bv = bio_iovec_idx(clone, i); | 411 | struct bio_vec *bv = bio_iovec_idx(clone, i); |
412 | 412 | ||
413 | bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask); | 413 | bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask); |
@@ -419,7 +419,7 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size, | |||
419 | * return a partially allocated bio, the caller will then try | 419 | * return a partially allocated bio, the caller will then try |
420 | * to allocate additional bios while submitting this partial bio | 420 | * to allocate additional bios while submitting this partial bio |
421 | */ | 421 | */ |
422 | if ((i - clone->bi_idx) == (MIN_BIO_PAGES - 1)) | 422 | if (i == (MIN_BIO_PAGES - 1)) |
423 | gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; | 423 | gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; |
424 | 424 | ||
425 | bv->bv_offset = 0; | 425 | bv->bv_offset = 0; |
@@ -438,12 +438,6 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size, | |||
438 | return NULL; | 438 | return NULL; |
439 | } | 439 | } |
440 | 440 | ||
441 | /* | ||
442 | * Remember the last bio_vec allocated to be able | ||
443 | * to correctly continue after the splitting. | ||
444 | */ | ||
445 | *bio_vec_idx = clone->bi_vcnt; | ||
446 | |||
447 | return clone; | 441 | return clone; |
448 | } | 442 | } |
449 | 443 | ||
@@ -495,9 +489,6 @@ static void dec_pending(struct crypt_io *io, int error) | |||
495 | if (!atomic_dec_and_test(&io->pending)) | 489 | if (!atomic_dec_and_test(&io->pending)) |
496 | return; | 490 | return; |
497 | 491 | ||
498 | if (io->first_clone) | ||
499 | bio_put(io->first_clone); | ||
500 | |||
501 | bio_endio(io->base_bio, io->base_bio->bi_size, io->error); | 492 | bio_endio(io->base_bio, io->base_bio->bi_size, io->error); |
502 | 493 | ||
503 | mempool_free(io, cc->io_pool); | 494 | mempool_free(io, cc->io_pool); |
@@ -562,6 +553,7 @@ static void clone_init(struct crypt_io *io, struct bio *clone) | |||
562 | clone->bi_end_io = crypt_endio; | 553 | clone->bi_end_io = crypt_endio; |
563 | clone->bi_bdev = cc->dev->bdev; | 554 | clone->bi_bdev = cc->dev->bdev; |
564 | clone->bi_rw = io->base_bio->bi_rw; | 555 | clone->bi_rw = io->base_bio->bi_rw; |
556 | clone->bi_destructor = dm_crypt_bio_destructor; | ||
565 | } | 557 | } |
566 | 558 | ||
567 | static void process_read(struct crypt_io *io) | 559 | static void process_read(struct crypt_io *io) |
@@ -585,7 +577,6 @@ static void process_read(struct crypt_io *io) | |||
585 | } | 577 | } |
586 | 578 | ||
587 | clone_init(io, clone); | 579 | clone_init(io, clone); |
588 | clone->bi_destructor = dm_crypt_bio_destructor; | ||
589 | clone->bi_idx = 0; | 580 | clone->bi_idx = 0; |
590 | clone->bi_vcnt = bio_segments(base_bio); | 581 | clone->bi_vcnt = bio_segments(base_bio); |
591 | clone->bi_size = base_bio->bi_size; | 582 | clone->bi_size = base_bio->bi_size; |
@@ -604,7 +595,6 @@ static void process_write(struct crypt_io *io) | |||
604 | struct convert_context ctx; | 595 | struct convert_context ctx; |
605 | unsigned remaining = base_bio->bi_size; | 596 | unsigned remaining = base_bio->bi_size; |
606 | sector_t sector = base_bio->bi_sector - io->target->begin; | 597 | sector_t sector = base_bio->bi_sector - io->target->begin; |
607 | unsigned bvec_idx = 0; | ||
608 | 598 | ||
609 | atomic_inc(&io->pending); | 599 | atomic_inc(&io->pending); |
610 | 600 | ||
@@ -615,14 +605,14 @@ static void process_write(struct crypt_io *io) | |||
615 | * so repeat the whole process until all the data can be handled. | 605 | * so repeat the whole process until all the data can be handled. |
616 | */ | 606 | */ |
617 | while (remaining) { | 607 | while (remaining) { |
618 | clone = crypt_alloc_buffer(cc, base_bio->bi_size, | 608 | clone = crypt_alloc_buffer(io, remaining); |
619 | io->first_clone, &bvec_idx); | ||
620 | if (unlikely(!clone)) { | 609 | if (unlikely(!clone)) { |
621 | dec_pending(io, -ENOMEM); | 610 | dec_pending(io, -ENOMEM); |
622 | return; | 611 | return; |
623 | } | 612 | } |
624 | 613 | ||
625 | ctx.bio_out = clone; | 614 | ctx.bio_out = clone; |
615 | ctx.idx_out = 0; | ||
626 | 616 | ||
627 | if (unlikely(crypt_convert(cc, &ctx) < 0)) { | 617 | if (unlikely(crypt_convert(cc, &ctx) < 0)) { |
628 | crypt_free_buffer_pages(cc, clone, clone->bi_size); | 618 | crypt_free_buffer_pages(cc, clone, clone->bi_size); |
@@ -631,31 +621,26 @@ static void process_write(struct crypt_io *io) | |||
631 | return; | 621 | return; |
632 | } | 622 | } |
633 | 623 | ||
634 | clone_init(io, clone); | 624 | /* crypt_convert should have filled the clone bio */ |
635 | clone->bi_sector = cc->start + sector; | 625 | BUG_ON(ctx.idx_out < clone->bi_vcnt); |
636 | |||
637 | if (!io->first_clone) { | ||
638 | /* | ||
639 | * hold a reference to the first clone, because it | ||
640 | * holds the bio_vec array and that can't be freed | ||
641 | * before all other clones are released | ||
642 | */ | ||
643 | bio_get(clone); | ||
644 | io->first_clone = clone; | ||
645 | } | ||
646 | 626 | ||
627 | clone->bi_sector = cc->start + sector; | ||
647 | remaining -= clone->bi_size; | 628 | remaining -= clone->bi_size; |
648 | sector += bio_sectors(clone); | 629 | sector += bio_sectors(clone); |
649 | 630 | ||
650 | /* prevent bio_put of first_clone */ | 631 | /* Grab another reference to the io struct |
632 | * before we kick off the request */ | ||
651 | if (remaining) | 633 | if (remaining) |
652 | atomic_inc(&io->pending); | 634 | atomic_inc(&io->pending); |
653 | 635 | ||
654 | generic_make_request(clone); | 636 | generic_make_request(clone); |
655 | 637 | ||
638 | /* Do not reference clone after this - it | ||
639 | * may be gone already. */ | ||
640 | |||
656 | /* out of memory -> run queues */ | 641 | /* out of memory -> run queues */ |
657 | if (remaining) | 642 | if (remaining) |
658 | congestion_wait(bio_data_dir(clone), HZ/100); | 643 | congestion_wait(WRITE, HZ/100); |
659 | } | 644 | } |
660 | } | 645 | } |
661 | 646 | ||
@@ -832,6 +817,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
832 | cc->iv_gen_ops = &crypt_iv_essiv_ops; | 817 | cc->iv_gen_ops = &crypt_iv_essiv_ops; |
833 | else if (strcmp(ivmode, "benbi") == 0) | 818 | else if (strcmp(ivmode, "benbi") == 0) |
834 | cc->iv_gen_ops = &crypt_iv_benbi_ops; | 819 | cc->iv_gen_ops = &crypt_iv_benbi_ops; |
820 | else if (strcmp(ivmode, "null") == 0) | ||
821 | cc->iv_gen_ops = &crypt_iv_null_ops; | ||
835 | else { | 822 | else { |
836 | ti->error = "Invalid IV mode"; | 823 | ti->error = "Invalid IV mode"; |
837 | goto bad2; | 824 | goto bad2; |
@@ -954,10 +941,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, | |||
954 | struct crypt_config *cc = ti->private; | 941 | struct crypt_config *cc = ti->private; |
955 | struct crypt_io *io; | 942 | struct crypt_io *io; |
956 | 943 | ||
944 | if (bio_barrier(bio)) | ||
945 | return -EOPNOTSUPP; | ||
946 | |||
957 | io = mempool_alloc(cc->io_pool, GFP_NOIO); | 947 | io = mempool_alloc(cc->io_pool, GFP_NOIO); |
958 | io->target = ti; | 948 | io->target = ti; |
959 | io->base_bio = bio; | 949 | io->base_bio = bio; |
960 | io->first_clone = NULL; | ||
961 | io->error = io->post_process = 0; | 950 | io->error = io->post_process = 0; |
962 | atomic_set(&io->pending, 0); | 951 | atomic_set(&io->pending, 0); |
963 | kcryptd_queue_io(io); | 952 | kcryptd_queue_io(io); |
@@ -1057,7 +1046,7 @@ error: | |||
1057 | 1046 | ||
1058 | static struct target_type crypt_target = { | 1047 | static struct target_type crypt_target = { |
1059 | .name = "crypt", | 1048 | .name = "crypt", |
1060 | .version= {1, 3, 0}, | 1049 | .version= {1, 5, 0}, |
1061 | .module = THIS_MODULE, | 1050 | .module = THIS_MODULE, |
1062 | .ctr = crypt_ctr, | 1051 | .ctr = crypt_ctr, |
1063 | .dtr = crypt_dtr, | 1052 | .dtr = crypt_dtr, |