aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-crypt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r--drivers/md/dm-crypt.c109
1 files changed, 66 insertions, 43 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 13956437bc81..682ef9e6acd3 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -333,7 +333,6 @@ static void crypt_convert_init(struct crypt_config *cc,
333 ctx->idx_out = bio_out ? bio_out->bi_idx : 0; 333 ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
334 ctx->sector = sector + cc->iv_offset; 334 ctx->sector = sector + cc->iv_offset;
335 init_completion(&ctx->restart); 335 init_completion(&ctx->restart);
336 atomic_set(&ctx->pending, 1);
337} 336}
338 337
339static int crypt_convert_block(struct crypt_config *cc, 338static int crypt_convert_block(struct crypt_config *cc,
@@ -408,6 +407,8 @@ static int crypt_convert(struct crypt_config *cc,
408{ 407{
409 int r; 408 int r;
410 409
410 atomic_set(&ctx->pending, 1);
411
411 while(ctx->idx_in < ctx->bio_in->bi_vcnt && 412 while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
412 ctx->idx_out < ctx->bio_out->bi_vcnt) { 413 ctx->idx_out < ctx->bio_out->bi_vcnt) {
413 414
@@ -456,9 +457,11 @@ static void dm_crypt_bio_destructor(struct bio *bio)
456/* 457/*
457 * Generate a new unfragmented bio with the given size 458 * Generate a new unfragmented bio with the given size
458 * This should never violate the device limitations 459 * This should never violate the device limitations
459 * May return a smaller bio when running out of pages 460 * May return a smaller bio when running out of pages, indicated by
461 * *out_of_pages set to 1.
460 */ 462 */
461static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) 463static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
464 unsigned *out_of_pages)
462{ 465{
463 struct crypt_config *cc = io->target->private; 466 struct crypt_config *cc = io->target->private;
464 struct bio *clone; 467 struct bio *clone;
@@ -472,11 +475,14 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
472 return NULL; 475 return NULL;
473 476
474 clone_init(io, clone); 477 clone_init(io, clone);
478 *out_of_pages = 0;
475 479
476 for (i = 0; i < nr_iovecs; i++) { 480 for (i = 0; i < nr_iovecs; i++) {
477 page = mempool_alloc(cc->page_pool, gfp_mask); 481 page = mempool_alloc(cc->page_pool, gfp_mask);
478 if (!page) 482 if (!page) {
483 *out_of_pages = 1;
479 break; 484 break;
485 }
480 486
481 /* 487 /*
482 * if additional pages cannot be allocated without waiting, 488 * if additional pages cannot be allocated without waiting,
@@ -517,6 +523,27 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
517 } 523 }
518} 524}
519 525
526static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti,
527 struct bio *bio, sector_t sector)
528{
529 struct crypt_config *cc = ti->private;
530 struct dm_crypt_io *io;
531
532 io = mempool_alloc(cc->io_pool, GFP_NOIO);
533 io->target = ti;
534 io->base_bio = bio;
535 io->sector = sector;
536 io->error = 0;
537 atomic_set(&io->pending, 0);
538
539 return io;
540}
541
542static void crypt_inc_pending(struct dm_crypt_io *io)
543{
544 atomic_inc(&io->pending);
545}
546
520/* 547/*
521 * One of the bios was finished. Check for completion of 548 * One of the bios was finished. Check for completion of
522 * the whole request and correctly clean up the buffer. 549 * the whole request and correctly clean up the buffer.
@@ -591,7 +618,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io)
591 struct bio *base_bio = io->base_bio; 618 struct bio *base_bio = io->base_bio;
592 struct bio *clone; 619 struct bio *clone;
593 620
594 atomic_inc(&io->pending); 621 crypt_inc_pending(io);
595 622
596 /* 623 /*
597 * The block layer might modify the bvec array, so always 624 * The block layer might modify the bvec array, so always
@@ -653,6 +680,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
653 crypt_free_buffer_pages(cc, clone); 680 crypt_free_buffer_pages(cc, clone);
654 bio_put(clone); 681 bio_put(clone);
655 io->error = -EIO; 682 io->error = -EIO;
683 crypt_dec_pending(io);
656 return; 684 return;
657 } 685 }
658 686
@@ -664,28 +692,34 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
664 692
665 if (async) 693 if (async)
666 kcryptd_queue_io(io); 694 kcryptd_queue_io(io);
667 else { 695 else
668 atomic_inc(&io->pending);
669 generic_make_request(clone); 696 generic_make_request(clone);
670 }
671} 697}
672 698
673static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io) 699static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
674{ 700{
675 struct crypt_config *cc = io->target->private; 701 struct crypt_config *cc = io->target->private;
676 struct bio *clone; 702 struct bio *clone;
703 int crypt_finished;
704 unsigned out_of_pages = 0;
677 unsigned remaining = io->base_bio->bi_size; 705 unsigned remaining = io->base_bio->bi_size;
678 int r; 706 int r;
679 707
680 /* 708 /*
709 * Prevent io from disappearing until this function completes.
710 */
711 crypt_inc_pending(io);
712 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector);
713
714 /*
681 * The allocated buffers can be smaller than the whole bio, 715 * The allocated buffers can be smaller than the whole bio,
682 * so repeat the whole process until all the data can be handled. 716 * so repeat the whole process until all the data can be handled.
683 */ 717 */
684 while (remaining) { 718 while (remaining) {
685 clone = crypt_alloc_buffer(io, remaining); 719 clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
686 if (unlikely(!clone)) { 720 if (unlikely(!clone)) {
687 io->error = -ENOMEM; 721 io->error = -ENOMEM;
688 return; 722 break;
689 } 723 }
690 724
691 io->ctx.bio_out = clone; 725 io->ctx.bio_out = clone;
@@ -693,37 +727,32 @@ static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io)
693 727
694 remaining -= clone->bi_size; 728 remaining -= clone->bi_size;
695 729
730 crypt_inc_pending(io);
696 r = crypt_convert(cc, &io->ctx); 731 r = crypt_convert(cc, &io->ctx);
732 crypt_finished = atomic_dec_and_test(&io->ctx.pending);
697 733
698 if (atomic_dec_and_test(&io->ctx.pending)) { 734 /* Encryption was already finished, submit io now */
699 /* processed, no running async crypto */ 735 if (crypt_finished) {
700 kcryptd_crypt_write_io_submit(io, r, 0); 736 kcryptd_crypt_write_io_submit(io, r, 0);
701 if (unlikely(r < 0))
702 return;
703 } else
704 atomic_inc(&io->pending);
705 737
706 /* out of memory -> run queues */ 738 /*
707 if (unlikely(remaining)) { 739 * If there was an error, do not try next fragments.
708 /* wait for async crypto then reinitialize pending */ 740 * For async, error is processed in async handler.
709 wait_event(cc->writeq, !atomic_read(&io->ctx.pending)); 741 */
710 atomic_set(&io->ctx.pending, 1); 742 if (unlikely(r < 0))
711 congestion_wait(WRITE, HZ/100); 743 break;
712 } 744 }
713 }
714}
715 745
716static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) 746 /*
717{ 747 * Out of memory -> run queues
718 struct crypt_config *cc = io->target->private; 748 * But don't wait if split was due to the io size restriction
719 749 */
720 /* 750 if (unlikely(out_of_pages))
721 * Prevent io from disappearing until this function completes. 751 congestion_wait(WRITE, HZ/100);
722 */
723 atomic_inc(&io->pending);
724 752
725 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector); 753 if (unlikely(remaining))
726 kcryptd_crypt_write_convert_loop(io); 754 wait_event(cc->writeq, !atomic_read(&io->ctx.pending));
755 }
727 756
728 crypt_dec_pending(io); 757 crypt_dec_pending(io);
729} 758}
@@ -741,7 +770,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
741 struct crypt_config *cc = io->target->private; 770 struct crypt_config *cc = io->target->private;
742 int r = 0; 771 int r = 0;
743 772
744 atomic_inc(&io->pending); 773 crypt_inc_pending(io);
745 774
746 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, 775 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
747 io->sector); 776 io->sector);
@@ -1108,15 +1137,9 @@ static void crypt_dtr(struct dm_target *ti)
1108static int crypt_map(struct dm_target *ti, struct bio *bio, 1137static int crypt_map(struct dm_target *ti, struct bio *bio,
1109 union map_info *map_context) 1138 union map_info *map_context)
1110{ 1139{
1111 struct crypt_config *cc = ti->private;
1112 struct dm_crypt_io *io; 1140 struct dm_crypt_io *io;
1113 1141
1114 io = mempool_alloc(cc->io_pool, GFP_NOIO); 1142 io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin);
1115 io->target = ti;
1116 io->base_bio = bio;
1117 io->sector = bio->bi_sector - ti->begin;
1118 io->error = 0;
1119 atomic_set(&io->pending, 0);
1120 1143
1121 if (bio_data_dir(io->base_bio) == READ) 1144 if (bio_data_dir(io->base_bio) == READ)
1122 kcryptd_queue_io(io); 1145 kcryptd_queue_io(io);