aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorOlaf Kirch <olaf.kirch@oracle.com>2007-05-09 05:32:54 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-09 15:30:46 -0400
commitf97380bcadd6bd2e368727de4061aaba4989c426 (patch)
tree179db9e733545aa1e796a22f670fc86963059492 /drivers/md
parent2f9941b6c55d70103c1bc3f2c7676acd9f20bf8a (diff)
dm crypt: use smaller bvecs in clones
Allocate smaller clones With the previous dm-crypt fixes, there is no need for the clone bios to have the same bvec size as the original - we just need to make them big enough for the remaining number of pages. The only requirement is that we clear the "out" index in convert_context, so that crypt_convert starts storing data at the right position within the clone bio. Signed-off-by: Olaf Kirch <olaf.kirch@oracle.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com> Cc: Jens Axboe <jens.axboe@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-crypt.c29
1 files changed, 8 insertions, 21 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 339b575ce07f..1ecee5e1c548 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -379,8 +379,7 @@ static int crypt_convert(struct crypt_config *cc,
379 * This should never violate the device limitations 379 * This should never violate the device limitations
380 * May return a smaller bio when running out of pages 380 * May return a smaller bio when running out of pages
381 */ 381 */
382static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size, 382static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size)
383 unsigned int *bio_vec_idx)
384{ 383{
385 struct crypt_config *cc = io->target->private; 384 struct crypt_config *cc = io->target->private;
386 struct bio *clone; 385 struct bio *clone;
@@ -394,16 +393,7 @@ static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size,
394 393
395 clone_init(io, clone); 394 clone_init(io, clone);
396 395
397 /* if the last bio was not complete, continue where that one ended */ 396 for (i = 0; i < nr_iovecs; i++) {
398 clone->bi_idx = *bio_vec_idx;
399 clone->bi_vcnt = *bio_vec_idx;
400 clone->bi_size = 0;
401 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
402
403 /* clone->bi_idx pages have already been allocated */
404 size -= clone->bi_idx * PAGE_SIZE;
405
406 for (i = clone->bi_idx; i < nr_iovecs; i++) {
407 struct bio_vec *bv = bio_iovec_idx(clone, i); 397 struct bio_vec *bv = bio_iovec_idx(clone, i);
408 398
409 bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask); 399 bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask);
@@ -415,7 +405,7 @@ static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size,
415 * return a partially allocated bio, the caller will then try 405 * return a partially allocated bio, the caller will then try
416 * to allocate additional bios while submitting this partial bio 406 * to allocate additional bios while submitting this partial bio
417 */ 407 */
418 if ((i - clone->bi_idx) == (MIN_BIO_PAGES - 1)) 408 if (i == (MIN_BIO_PAGES - 1))
419 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; 409 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
420 410
421 bv->bv_offset = 0; 411 bv->bv_offset = 0;
@@ -434,12 +424,6 @@ static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size,
434 return NULL; 424 return NULL;
435 } 425 }
436 426
437 /*
438 * Remember the last bio_vec allocated to be able
439 * to correctly continue after the splitting.
440 */
441 *bio_vec_idx = clone->bi_vcnt;
442
443 return clone; 427 return clone;
444} 428}
445 429
@@ -597,7 +581,6 @@ static void process_write(struct crypt_io *io)
597 struct convert_context ctx; 581 struct convert_context ctx;
598 unsigned remaining = base_bio->bi_size; 582 unsigned remaining = base_bio->bi_size;
599 sector_t sector = base_bio->bi_sector - io->target->begin; 583 sector_t sector = base_bio->bi_sector - io->target->begin;
600 unsigned bvec_idx = 0;
601 584
602 atomic_inc(&io->pending); 585 atomic_inc(&io->pending);
603 586
@@ -608,13 +591,14 @@ static void process_write(struct crypt_io *io)
608 * so repeat the whole process until all the data can be handled. 591 * so repeat the whole process until all the data can be handled.
609 */ 592 */
610 while (remaining) { 593 while (remaining) {
611 clone = crypt_alloc_buffer(io, base_bio->bi_size, &bvec_idx); 594 clone = crypt_alloc_buffer(io, remaining);
612 if (unlikely(!clone)) { 595 if (unlikely(!clone)) {
613 dec_pending(io, -ENOMEM); 596 dec_pending(io, -ENOMEM);
614 return; 597 return;
615 } 598 }
616 599
617 ctx.bio_out = clone; 600 ctx.bio_out = clone;
601 ctx.idx_out = 0;
618 602
619 if (unlikely(crypt_convert(cc, &ctx) < 0)) { 603 if (unlikely(crypt_convert(cc, &ctx) < 0)) {
620 crypt_free_buffer_pages(cc, clone, clone->bi_size); 604 crypt_free_buffer_pages(cc, clone, clone->bi_size);
@@ -623,6 +607,9 @@ static void process_write(struct crypt_io *io)
623 return; 607 return;
624 } 608 }
625 609
610 /* crypt_convert should have filled the clone bio */
611 BUG_ON(ctx.idx_out < clone->bi_vcnt);
612
626 clone->bi_sector = cc->start + sector; 613 clone->bi_sector = cc->start + sector;
627 remaining -= clone->bi_size; 614 remaining -= clone->bi_size;
628 sector += bio_sectors(clone); 615 sector += bio_sectors(clone);