diff options
author | Olaf Kirch <olaf.kirch@oracle.com> | 2007-05-09 05:32:53 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-09 15:30:46 -0400 |
commit | 2f9941b6c55d70103c1bc3f2c7676acd9f20bf8a (patch) | |
tree | 523af38a7f1d7f1f875ca43a8c2998a29026dd70 | |
parent | 98221eb757de03d9aa6262b1eded2be708640ccc (diff) |
dm crypt: fix remove first_clone
Get rid of first_clone in dm-crypt
This gets rid of first_clone, which is not really needed. Apparently, cloned
bios used to share their bvec some time way in the past - this is no longer
the case. Contrarily, this even hurts us if we try to create a clone off
first_clone after it has completed, and crypt_endio has destroyed its bvec.
Signed-off-by: Olaf Kirch <olaf.kirch@oracle.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | drivers/md/dm-crypt.c | 34 |
1 files changed, 6 insertions, 28 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 1dc2c62200ee..339b575ce07f 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -33,7 +33,6 @@ | |||
33 | struct crypt_io { | 33 | struct crypt_io { |
34 | struct dm_target *target; | 34 | struct dm_target *target; |
35 | struct bio *base_bio; | 35 | struct bio *base_bio; |
36 | struct bio *first_clone; | ||
37 | struct work_struct work; | 36 | struct work_struct work; |
38 | atomic_t pending; | 37 | atomic_t pending; |
39 | int error; | 38 | int error; |
@@ -380,9 +379,8 @@ static int crypt_convert(struct crypt_config *cc, | |||
380 | * This should never violate the device limitations | 379 | * This should never violate the device limitations |
381 | * May return a smaller bio when running out of pages | 380 | * May return a smaller bio when running out of pages |
382 | */ | 381 | */ |
383 | static struct bio * | 382 | static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size, |
384 | crypt_alloc_buffer(struct crypt_io *io, unsigned int size, | 383 | unsigned int *bio_vec_idx) |
385 | struct bio *base_bio, unsigned int *bio_vec_idx) | ||
386 | { | 384 | { |
387 | struct crypt_config *cc = io->target->private; | 385 | struct crypt_config *cc = io->target->private; |
388 | struct bio *clone; | 386 | struct bio *clone; |
@@ -390,12 +388,7 @@ crypt_alloc_buffer(struct crypt_io *io, unsigned int size, | |||
390 | gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; | 388 | gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; |
391 | unsigned int i; | 389 | unsigned int i; |
392 | 390 | ||
393 | if (base_bio) { | 391 | clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); |
394 | clone = bio_alloc_bioset(GFP_NOIO, base_bio->bi_max_vecs, cc->bs); | ||
395 | __bio_clone(clone, base_bio); | ||
396 | } else | ||
397 | clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); | ||
398 | |||
399 | if (!clone) | 392 | if (!clone) |
400 | return NULL; | 393 | return NULL; |
401 | 394 | ||
@@ -498,9 +491,6 @@ static void dec_pending(struct crypt_io *io, int error) | |||
498 | if (!atomic_dec_and_test(&io->pending)) | 491 | if (!atomic_dec_and_test(&io->pending)) |
499 | return; | 492 | return; |
500 | 493 | ||
501 | if (io->first_clone) | ||
502 | bio_put(io->first_clone); | ||
503 | |||
504 | bio_endio(io->base_bio, io->base_bio->bi_size, io->error); | 494 | bio_endio(io->base_bio, io->base_bio->bi_size, io->error); |
505 | 495 | ||
506 | mempool_free(io, cc->io_pool); | 496 | mempool_free(io, cc->io_pool); |
@@ -618,8 +608,7 @@ static void process_write(struct crypt_io *io) | |||
618 | * so repeat the whole process until all the data can be handled. | 608 | * so repeat the whole process until all the data can be handled. |
619 | */ | 609 | */ |
620 | while (remaining) { | 610 | while (remaining) { |
621 | clone = crypt_alloc_buffer(io, base_bio->bi_size, | 611 | clone = crypt_alloc_buffer(io, base_bio->bi_size, &bvec_idx); |
622 | io->first_clone, &bvec_idx); | ||
623 | if (unlikely(!clone)) { | 612 | if (unlikely(!clone)) { |
624 | dec_pending(io, -ENOMEM); | 613 | dec_pending(io, -ENOMEM); |
625 | return; | 614 | return; |
@@ -635,21 +624,11 @@ static void process_write(struct crypt_io *io) | |||
635 | } | 624 | } |
636 | 625 | ||
637 | clone->bi_sector = cc->start + sector; | 626 | clone->bi_sector = cc->start + sector; |
638 | |||
639 | if (!io->first_clone) { | ||
640 | /* | ||
641 | * hold a reference to the first clone, because it | ||
642 | * holds the bio_vec array and that can't be freed | ||
643 | * before all other clones are released | ||
644 | */ | ||
645 | bio_get(clone); | ||
646 | io->first_clone = clone; | ||
647 | } | ||
648 | |||
649 | remaining -= clone->bi_size; | 627 | remaining -= clone->bi_size; |
650 | sector += bio_sectors(clone); | 628 | sector += bio_sectors(clone); |
651 | 629 | ||
652 | /* prevent bio_put of first_clone */ | 630 | /* Grab another reference to the io struct |
631 | * before we kick off the request */ | ||
653 | if (remaining) | 632 | if (remaining) |
654 | atomic_inc(&io->pending); | 633 | atomic_inc(&io->pending); |
655 | 634 | ||
@@ -965,7 +944,6 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, | |||
965 | io = mempool_alloc(cc->io_pool, GFP_NOIO); | 944 | io = mempool_alloc(cc->io_pool, GFP_NOIO); |
966 | io->target = ti; | 945 | io->target = ti; |
967 | io->base_bio = bio; | 946 | io->base_bio = bio; |
968 | io->first_clone = NULL; | ||
969 | io->error = io->post_process = 0; | 947 | io->error = io->post_process = 0; |
970 | atomic_set(&io->pending, 0); | 948 | atomic_set(&io->pending, 0); |
971 | kcryptd_queue_io(io); | 949 | kcryptd_queue_io(io); |