aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMilan Broz <mbroz@redhat.com>2008-10-10 08:37:08 -0400
committerAlasdair G Kergon <agk@redhat.com>2008-10-10 08:37:08 -0400
commit933f01d43326fb12a978a8e0bb062c28a2de4d5a (patch)
tree89f25c15fb98d90ee7377437482e444a8ac6a106 /drivers
parentc8081618a9f832fdf7ca81eb087f9f61f2bf07d5 (diff)
dm crypt: avoid unnecessary wait when splitting bio
Don't wait between submitting crypt requests for a bio unless we are short of memory. There are two situations when we must split an encrypted bio: 1) there are no free pages; 2) the new bio would violate underlying device restrictions (e.g. max hw segments). In case (2) we do not need to wait. Add output variable to crypt_alloc_buffer() to distinguish between these cases. Signed-off-by: Milan Broz <mbroz@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/dm-crypt.c26
1 files changed, 18 insertions, 8 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index f6018f5961de..682ef9e6acd3 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -457,9 +457,11 @@ static void dm_crypt_bio_destructor(struct bio *bio)
457/* 457/*
458 * Generate a new unfragmented bio with the given size 458 * Generate a new unfragmented bio with the given size
459 * This should never violate the device limitations 459 * This should never violate the device limitations
460 * May return a smaller bio when running out of pages 460 * May return a smaller bio when running out of pages, indicated by
461 * *out_of_pages set to 1.
461 */ 462 */
462static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) 463static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
464 unsigned *out_of_pages)
463{ 465{
464 struct crypt_config *cc = io->target->private; 466 struct crypt_config *cc = io->target->private;
465 struct bio *clone; 467 struct bio *clone;
@@ -473,11 +475,14 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
473 return NULL; 475 return NULL;
474 476
475 clone_init(io, clone); 477 clone_init(io, clone);
478 *out_of_pages = 0;
476 479
477 for (i = 0; i < nr_iovecs; i++) { 480 for (i = 0; i < nr_iovecs; i++) {
478 page = mempool_alloc(cc->page_pool, gfp_mask); 481 page = mempool_alloc(cc->page_pool, gfp_mask);
479 if (!page) 482 if (!page) {
483 *out_of_pages = 1;
480 break; 484 break;
485 }
481 486
482 /* 487 /*
483 * if additional pages cannot be allocated without waiting, 488 * if additional pages cannot be allocated without waiting,
@@ -696,6 +701,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
696 struct crypt_config *cc = io->target->private; 701 struct crypt_config *cc = io->target->private;
697 struct bio *clone; 702 struct bio *clone;
698 int crypt_finished; 703 int crypt_finished;
704 unsigned out_of_pages = 0;
699 unsigned remaining = io->base_bio->bi_size; 705 unsigned remaining = io->base_bio->bi_size;
700 int r; 706 int r;
701 707
@@ -710,7 +716,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
710 * so repeat the whole process until all the data can be handled. 716 * so repeat the whole process until all the data can be handled.
711 */ 717 */
712 while (remaining) { 718 while (remaining) {
713 clone = crypt_alloc_buffer(io, remaining); 719 clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
714 if (unlikely(!clone)) { 720 if (unlikely(!clone)) {
715 io->error = -ENOMEM; 721 io->error = -ENOMEM;
716 break; 722 break;
@@ -737,11 +743,15 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
737 break; 743 break;
738 } 744 }
739 745
740 /* out of memory -> run queues */ 746 /*
741 if (unlikely(remaining)) { 747 * Out of memory -> run queues
742 wait_event(cc->writeq, !atomic_read(&io->ctx.pending)); 748 * But don't wait if split was due to the io size restriction
749 */
750 if (unlikely(out_of_pages))
743 congestion_wait(WRITE, HZ/100); 751 congestion_wait(WRITE, HZ/100);
744 } 752
753 if (unlikely(remaining))
754 wait_event(cc->writeq, !atomic_read(&io->ctx.pending));
745 } 755 }
746 756
747 crypt_dec_pending(io); 757 crypt_dec_pending(io);