aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorMilan Broz <mbroz@redhat.com>2009-03-16 13:44:36 -0400
committerAlasdair G Kergon <agk@redhat.com>2009-03-16 13:44:36 -0400
commitb35f8caa0890169000fec22902290d9a15274cbd (patch)
treea1a8ad3e5ba8b36da631d7125e0deb4ae743955a /drivers/md
parentb2174eebd1fadb76454dad09a1dacbc17081e6b0 (diff)
dm crypt: wait for endio to complete before destruction
The following oops has been reported when dm-crypt runs over a loop device. ... [ 70.381058] Process loop0 (pid: 4268, ti=cf3b2000 task=cf1cc1f0 task.ti=cf3b2000) ... [ 70.381058] Call Trace: [ 70.381058] [<d0d76601>] ? crypt_dec_pending+0x5e/0x62 [dm_crypt] [ 70.381058] [<d0d767b8>] ? crypt_endio+0xa2/0xaa [dm_crypt] [ 70.381058] [<d0d76716>] ? crypt_endio+0x0/0xaa [dm_crypt] [ 70.381058] [<c01a2f24>] ? bio_endio+0x2b/0x2e [ 70.381058] [<d0806530>] ? dec_pending+0x224/0x23b [dm_mod] [ 70.381058] [<d08066e4>] ? clone_endio+0x79/0xa4 [dm_mod] [ 70.381058] [<d080666b>] ? clone_endio+0x0/0xa4 [dm_mod] [ 70.381058] [<c01a2f24>] ? bio_endio+0x2b/0x2e [ 70.381058] [<c02bad86>] ? loop_thread+0x380/0x3b7 [ 70.381058] [<c02ba8a1>] ? do_lo_send_aops+0x0/0x165 [ 70.381058] [<c013754f>] ? autoremove_wake_function+0x0/0x33 [ 70.381058] [<c02baa06>] ? loop_thread+0x0/0x3b7 When a table is being replaced, it waits for I/O to complete before destroying the mempool, but the endio function doesn't call mempool_free() until after completing the bio. Fix it by swapping the order of those two operations. The same problem occurs in dm.c with md referenced after dec_pending. Again, we swap the order. Cc: stable@kernel.org Signed-off-by: Milan Broz <mbroz@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-crypt.c17
-rw-r--r--drivers/md/dm.c32
2 files changed, 29 insertions, 20 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index ebab49f8cc1d..bfefd079a955 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -568,19 +568,22 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
568static void crypt_dec_pending(struct dm_crypt_io *io) 568static void crypt_dec_pending(struct dm_crypt_io *io)
569{ 569{
570 struct crypt_config *cc = io->target->private; 570 struct crypt_config *cc = io->target->private;
571 struct bio *base_bio = io->base_bio;
572 struct dm_crypt_io *base_io = io->base_io;
573 int error = io->error;
571 574
572 if (!atomic_dec_and_test(&io->pending)) 575 if (!atomic_dec_and_test(&io->pending))
573 return; 576 return;
574 577
575 if (likely(!io->base_io)) 578 mempool_free(io, cc->io_pool);
576 bio_endio(io->base_bio, io->error); 579
580 if (likely(!base_io))
581 bio_endio(base_bio, error);
577 else { 582 else {
578 if (io->error && !io->base_io->error) 583 if (error && !base_io->error)
579 io->base_io->error = io->error; 584 base_io->error = error;
580 crypt_dec_pending(io->base_io); 585 crypt_dec_pending(base_io);
581 } 586 }
582
583 mempool_free(io, cc->io_pool);
584} 587}
585 588
586/* 589/*
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 51ba1db4b3e7..8d40f27cce89 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -525,9 +525,12 @@ static int __noflush_suspending(struct mapped_device *md)
525static void dec_pending(struct dm_io *io, int error) 525static void dec_pending(struct dm_io *io, int error)
526{ 526{
527 unsigned long flags; 527 unsigned long flags;
528 int io_error;
529 struct bio *bio;
530 struct mapped_device *md = io->md;
528 531
529 /* Push-back supersedes any I/O errors */ 532 /* Push-back supersedes any I/O errors */
530 if (error && !(io->error > 0 && __noflush_suspending(io->md))) 533 if (error && !(io->error > 0 && __noflush_suspending(md)))
531 io->error = error; 534 io->error = error;
532 535
533 if (atomic_dec_and_test(&io->io_count)) { 536 if (atomic_dec_and_test(&io->io_count)) {
@@ -537,24 +540,27 @@ static void dec_pending(struct dm_io *io, int error)
537 * This must be handled before the sleeper on 540 * This must be handled before the sleeper on
538 * suspend queue merges the pushback list. 541 * suspend queue merges the pushback list.
539 */ 542 */
540 spin_lock_irqsave(&io->md->pushback_lock, flags); 543 spin_lock_irqsave(&md->pushback_lock, flags);
541 if (__noflush_suspending(io->md)) 544 if (__noflush_suspending(md))
542 bio_list_add(&io->md->pushback, io->bio); 545 bio_list_add(&md->pushback, io->bio);
543 else 546 else
544 /* noflush suspend was interrupted. */ 547 /* noflush suspend was interrupted. */
545 io->error = -EIO; 548 io->error = -EIO;
546 spin_unlock_irqrestore(&io->md->pushback_lock, flags); 549 spin_unlock_irqrestore(&md->pushback_lock, flags);
547 } 550 }
548 551
549 end_io_acct(io); 552 end_io_acct(io);
550 553
551 if (io->error != DM_ENDIO_REQUEUE) { 554 io_error = io->error;
552 trace_block_bio_complete(io->md->queue, io->bio); 555 bio = io->bio;
553 556
554 bio_endio(io->bio, io->error); 557 free_io(md, io);
555 } 558
559 if (io_error != DM_ENDIO_REQUEUE) {
560 trace_block_bio_complete(md->queue, bio);
556 561
557 free_io(io->md, io); 562 bio_endio(bio, io_error);
563 }
558 } 564 }
559} 565}
560 566
@@ -562,6 +568,7 @@ static void clone_endio(struct bio *bio, int error)
562{ 568{
563 int r = 0; 569 int r = 0;
564 struct dm_target_io *tio = bio->bi_private; 570 struct dm_target_io *tio = bio->bi_private;
571 struct dm_io *io = tio->io;
565 struct mapped_device *md = tio->io->md; 572 struct mapped_device *md = tio->io->md;
566 dm_endio_fn endio = tio->ti->type->end_io; 573 dm_endio_fn endio = tio->ti->type->end_io;
567 574
@@ -585,15 +592,14 @@ static void clone_endio(struct bio *bio, int error)
585 } 592 }
586 } 593 }
587 594
588 dec_pending(tio->io, error);
589
590 /* 595 /*
591 * Store md for cleanup instead of tio which is about to get freed. 596 * Store md for cleanup instead of tio which is about to get freed.
592 */ 597 */
593 bio->bi_private = md->bs; 598 bio->bi_private = md->bs;
594 599
595 bio_put(bio);
596 free_tio(md, tio); 600 free_tio(md, tio);
601 bio_put(bio);
602 dec_pending(io, error);
597} 603}
598 604
599static sector_t max_io_len(struct mapped_device *md, 605static sector_t max_io_len(struct mapped_device *md,