aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-crypt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r--drivers/md/dm-crypt.c41
1 files changed, 25 insertions, 16 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index d3c48ad580d9..4df7d2f782d8 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -577,18 +577,34 @@ static void kcryptd_queue_io(struct dm_crypt_io *io)
577 577
578static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int error) 578static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int error)
579{ 579{
580 struct bio *clone = io->ctx.bio_out;
581 struct crypt_config *cc = io->target->private;
582
583 if (unlikely(error < 0)) {
584 crypt_free_buffer_pages(cc, clone);
585 bio_put(clone);
586 io->error = -EIO;
587 crypt_dec_pending(io);
588 return;
589 }
590
591 /* crypt_convert should have filled the clone bio */
592 BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
593
594 clone->bi_sector = cc->start + io->sector;
595 io->sector += bio_sectors(clone);
580} 596}
581 597
582static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) 598static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
583{ 599{
584 struct crypt_config *cc = io->target->private; 600 struct crypt_config *cc = io->target->private;
585 struct bio *base_bio = io->base_bio;
586 struct bio *clone; 601 struct bio *clone;
587 unsigned remaining = base_bio->bi_size; 602 unsigned remaining = io->base_bio->bi_size;
603 int r;
588 604
589 atomic_inc(&io->pending); 605 atomic_inc(&io->pending);
590 606
591 crypt_convert_init(cc, &io->ctx, NULL, base_bio, io->sector); 607 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector);
592 608
593 /* 609 /*
594 * The allocated buffers can be smaller than the whole bio, 610 * The allocated buffers can be smaller than the whole bio,
@@ -605,20 +621,13 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
605 io->ctx.bio_out = clone; 621 io->ctx.bio_out = clone;
606 io->ctx.idx_out = 0; 622 io->ctx.idx_out = 0;
607 623
608 if (unlikely(crypt_convert(cc, &io->ctx) < 0)) { 624 remaining -= clone->bi_size;
609 crypt_free_buffer_pages(cc, clone);
610 bio_put(clone);
611 io->error = -EIO;
612 crypt_dec_pending(io);
613 return;
614 }
615 625
616 /* crypt_convert should have filled the clone bio */ 626 r = crypt_convert(cc, &io->ctx);
617 BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
618 627
619 clone->bi_sector = cc->start + io->sector; 628 kcryptd_crypt_write_io_submit(io, r);
620 remaining -= clone->bi_size; 629 if (unlikely(r < 0))
621 io->sector += bio_sectors(clone); 630 return;
622 631
623 /* Grab another reference to the io struct 632 /* Grab another reference to the io struct
624 * before we kick off the request */ 633 * before we kick off the request */
@@ -631,7 +640,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
631 * may be gone already. */ 640 * may be gone already. */
632 641
633 /* out of memory -> run queues */ 642 /* out of memory -> run queues */
634 if (remaining) 643 if (unlikely(remaining))
635 congestion_wait(WRITE, HZ/100); 644 congestion_wait(WRITE, HZ/100);
636 } 645 }
637} 646}