aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-03-17 11:59:33 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-03-17 11:59:33 -0400
commit18439c39e826191c0ef08c3a3271ce7ece46a860 (patch)
tree71594d3d002c2bb65014c21001ee5a57aac8c76a /drivers
parent9e8912e04e612b43897b4b722205408b92f423e5 (diff)
parentb35f8caa0890169000fec22902290d9a15274cbd (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm
* git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm: dm crypt: wait for endio to complete before destruction dm crypt: fix kcryptd_async_done parameter dm io: respect BIO_MAX_PAGES limit dm table: rework reference counting fix dm ioctl: validate name length when renaming
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/dm-crypt.c43
-rw-r--r--drivers/md/dm-io.c2
-rw-r--r--drivers/md/dm-ioctl.c7
-rw-r--r--drivers/md/dm.c32
4 files changed, 56 insertions, 28 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 35bda49796fb..bfefd079a955 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -60,6 +60,7 @@ struct dm_crypt_io {
60}; 60};
61 61
62struct dm_crypt_request { 62struct dm_crypt_request {
63 struct convert_context *ctx;
63 struct scatterlist sg_in; 64 struct scatterlist sg_in;
64 struct scatterlist sg_out; 65 struct scatterlist sg_out;
65}; 66};
@@ -335,6 +336,18 @@ static void crypt_convert_init(struct crypt_config *cc,
335 init_completion(&ctx->restart); 336 init_completion(&ctx->restart);
336} 337}
337 338
339static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
340 struct ablkcipher_request *req)
341{
342 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
343}
344
345static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
346 struct dm_crypt_request *dmreq)
347{
348 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
349}
350
338static int crypt_convert_block(struct crypt_config *cc, 351static int crypt_convert_block(struct crypt_config *cc,
339 struct convert_context *ctx, 352 struct convert_context *ctx,
340 struct ablkcipher_request *req) 353 struct ablkcipher_request *req)
@@ -345,10 +358,11 @@ static int crypt_convert_block(struct crypt_config *cc,
345 u8 *iv; 358 u8 *iv;
346 int r = 0; 359 int r = 0;
347 360
348 dmreq = (struct dm_crypt_request *)((char *)req + cc->dmreq_start); 361 dmreq = dmreq_of_req(cc, req);
349 iv = (u8 *)ALIGN((unsigned long)(dmreq + 1), 362 iv = (u8 *)ALIGN((unsigned long)(dmreq + 1),
350 crypto_ablkcipher_alignmask(cc->tfm) + 1); 363 crypto_ablkcipher_alignmask(cc->tfm) + 1);
351 364
365 dmreq->ctx = ctx;
352 sg_init_table(&dmreq->sg_in, 1); 366 sg_init_table(&dmreq->sg_in, 1);
353 sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, 367 sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
354 bv_in->bv_offset + ctx->offset_in); 368 bv_in->bv_offset + ctx->offset_in);
@@ -395,8 +409,9 @@ static void crypt_alloc_req(struct crypt_config *cc,
395 cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); 409 cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
396 ablkcipher_request_set_tfm(cc->req, cc->tfm); 410 ablkcipher_request_set_tfm(cc->req, cc->tfm);
397 ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG | 411 ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG |
398 CRYPTO_TFM_REQ_MAY_SLEEP, 412 CRYPTO_TFM_REQ_MAY_SLEEP,
399 kcryptd_async_done, ctx); 413 kcryptd_async_done,
414 dmreq_of_req(cc, cc->req));
400} 415}
401 416
402/* 417/*
@@ -553,19 +568,22 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
553static void crypt_dec_pending(struct dm_crypt_io *io) 568static void crypt_dec_pending(struct dm_crypt_io *io)
554{ 569{
555 struct crypt_config *cc = io->target->private; 570 struct crypt_config *cc = io->target->private;
571 struct bio *base_bio = io->base_bio;
572 struct dm_crypt_io *base_io = io->base_io;
573 int error = io->error;
556 574
557 if (!atomic_dec_and_test(&io->pending)) 575 if (!atomic_dec_and_test(&io->pending))
558 return; 576 return;
559 577
560 if (likely(!io->base_io)) 578 mempool_free(io, cc->io_pool);
561 bio_endio(io->base_bio, io->error); 579
580 if (likely(!base_io))
581 bio_endio(base_bio, error);
562 else { 582 else {
563 if (io->error && !io->base_io->error) 583 if (error && !base_io->error)
564 io->base_io->error = io->error; 584 base_io->error = error;
565 crypt_dec_pending(io->base_io); 585 crypt_dec_pending(base_io);
566 } 586 }
567
568 mempool_free(io, cc->io_pool);
569} 587}
570 588
571/* 589/*
@@ -821,7 +839,8 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
821static void kcryptd_async_done(struct crypto_async_request *async_req, 839static void kcryptd_async_done(struct crypto_async_request *async_req,
822 int error) 840 int error)
823{ 841{
824 struct convert_context *ctx = async_req->data; 842 struct dm_crypt_request *dmreq = async_req->data;
843 struct convert_context *ctx = dmreq->ctx;
825 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); 844 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
826 struct crypt_config *cc = io->target->private; 845 struct crypt_config *cc = io->target->private;
827 846
@@ -830,7 +849,7 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
830 return; 849 return;
831 } 850 }
832 851
833 mempool_free(ablkcipher_request_cast(async_req), cc->req_pool); 852 mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
834 853
835 if (!atomic_dec_and_test(&ctx->pending)) 854 if (!atomic_dec_and_test(&ctx->pending))
836 return; 855 return;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index f14813be4eff..36e2b5e46a6b 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -292,6 +292,8 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
292 (PAGE_SIZE >> SECTOR_SHIFT)); 292 (PAGE_SIZE >> SECTOR_SHIFT));
293 num_bvecs = 1 + min_t(int, bio_get_nr_vecs(where->bdev), 293 num_bvecs = 1 + min_t(int, bio_get_nr_vecs(where->bdev),
294 num_bvecs); 294 num_bvecs);
295 if (unlikely(num_bvecs > BIO_MAX_PAGES))
296 num_bvecs = BIO_MAX_PAGES;
295 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); 297 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
296 bio->bi_sector = where->sector + (where->count - remaining); 298 bio->bi_sector = where->sector + (where->count - remaining);
297 bio->bi_bdev = where->bdev; 299 bio->bi_bdev = where->bdev;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 54d0588fc1f6..f01096549a93 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -704,7 +704,8 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size)
704 char *new_name = (char *) param + param->data_start; 704 char *new_name = (char *) param + param->data_start;
705 705
706 if (new_name < param->data || 706 if (new_name < param->data ||
707 invalid_str(new_name, (void *) param + param_size)) { 707 invalid_str(new_name, (void *) param + param_size) ||
708 strlen(new_name) > DM_NAME_LEN - 1) {
708 DMWARN("Invalid new logical volume name supplied."); 709 DMWARN("Invalid new logical volume name supplied.");
709 return -EINVAL; 710 return -EINVAL;
710 } 711 }
@@ -1063,7 +1064,7 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
1063 1064
1064 r = populate_table(t, param, param_size); 1065 r = populate_table(t, param, param_size);
1065 if (r) { 1066 if (r) {
1066 dm_table_put(t); 1067 dm_table_destroy(t);
1067 goto out; 1068 goto out;
1068 } 1069 }
1069 1070
@@ -1071,7 +1072,7 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
1071 hc = dm_get_mdptr(md); 1072 hc = dm_get_mdptr(md);
1072 if (!hc || hc->md != md) { 1073 if (!hc || hc->md != md) {
1073 DMWARN("device has been removed from the dev hash table."); 1074 DMWARN("device has been removed from the dev hash table.");
1074 dm_table_put(t); 1075 dm_table_destroy(t);
1075 up_write(&_hash_lock); 1076 up_write(&_hash_lock);
1076 r = -ENXIO; 1077 r = -ENXIO;
1077 goto out; 1078 goto out;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 51ba1db4b3e7..8d40f27cce89 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -525,9 +525,12 @@ static int __noflush_suspending(struct mapped_device *md)
525static void dec_pending(struct dm_io *io, int error) 525static void dec_pending(struct dm_io *io, int error)
526{ 526{
527 unsigned long flags; 527 unsigned long flags;
528 int io_error;
529 struct bio *bio;
530 struct mapped_device *md = io->md;
528 531
529 /* Push-back supersedes any I/O errors */ 532 /* Push-back supersedes any I/O errors */
530 if (error && !(io->error > 0 && __noflush_suspending(io->md))) 533 if (error && !(io->error > 0 && __noflush_suspending(md)))
531 io->error = error; 534 io->error = error;
532 535
533 if (atomic_dec_and_test(&io->io_count)) { 536 if (atomic_dec_and_test(&io->io_count)) {
@@ -537,24 +540,27 @@ static void dec_pending(struct dm_io *io, int error)
537 * This must be handled before the sleeper on 540 * This must be handled before the sleeper on
538 * suspend queue merges the pushback list. 541 * suspend queue merges the pushback list.
539 */ 542 */
540 spin_lock_irqsave(&io->md->pushback_lock, flags); 543 spin_lock_irqsave(&md->pushback_lock, flags);
541 if (__noflush_suspending(io->md)) 544 if (__noflush_suspending(md))
542 bio_list_add(&io->md->pushback, io->bio); 545 bio_list_add(&md->pushback, io->bio);
543 else 546 else
544 /* noflush suspend was interrupted. */ 547 /* noflush suspend was interrupted. */
545 io->error = -EIO; 548 io->error = -EIO;
546 spin_unlock_irqrestore(&io->md->pushback_lock, flags); 549 spin_unlock_irqrestore(&md->pushback_lock, flags);
547 } 550 }
548 551
549 end_io_acct(io); 552 end_io_acct(io);
550 553
551 if (io->error != DM_ENDIO_REQUEUE) { 554 io_error = io->error;
552 trace_block_bio_complete(io->md->queue, io->bio); 555 bio = io->bio;
553 556
554 bio_endio(io->bio, io->error); 557 free_io(md, io);
555 } 558
559 if (io_error != DM_ENDIO_REQUEUE) {
560 trace_block_bio_complete(md->queue, bio);
556 561
557 free_io(io->md, io); 562 bio_endio(bio, io_error);
563 }
558 } 564 }
559} 565}
560 566
@@ -562,6 +568,7 @@ static void clone_endio(struct bio *bio, int error)
562{ 568{
563 int r = 0; 569 int r = 0;
564 struct dm_target_io *tio = bio->bi_private; 570 struct dm_target_io *tio = bio->bi_private;
571 struct dm_io *io = tio->io;
565 struct mapped_device *md = tio->io->md; 572 struct mapped_device *md = tio->io->md;
566 dm_endio_fn endio = tio->ti->type->end_io; 573 dm_endio_fn endio = tio->ti->type->end_io;
567 574
@@ -585,15 +592,14 @@ static void clone_endio(struct bio *bio, int error)
585 } 592 }
586 } 593 }
587 594
588 dec_pending(tio->io, error);
589
590 /* 595 /*
591 * Store md for cleanup instead of tio which is about to get freed. 596 * Store md for cleanup instead of tio which is about to get freed.
592 */ 597 */
593 bio->bi_private = md->bs; 598 bio->bi_private = md->bs;
594 599
595 bio_put(bio);
596 free_tio(md, tio); 600 free_tio(md, tio);
601 bio_put(bio);
602 dec_pending(io, error);
597} 603}
598 604
599static sector_t max_io_len(struct mapped_device *md, 605static sector_t max_io_len(struct mapped_device *md,