diff options
author | Joe Thornber <ejt@redhat.com> | 2014-10-10 10:27:16 -0400 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2014-11-10 15:25:28 -0500 |
commit | 2d759a46b4d65e1392843cf9df7101897af87008 (patch) | |
tree | 7cca57c04d5d8e29ca7ae232fa52f2e8fa6883c3 /drivers/md/dm-thin.c | |
parent | a374bb217b449a00eb96d0584bb833a8b62b672a (diff) |
dm thin: remap the bios in a cell immediately
This use of direct submission in process_prepared_mapping() reduces
latency for submitting bios in a cell by avoiding adding those bios to
the deferred list and waiting for the next iteration of the worker.
But this direct submission exposes the potential for a race between
releasing a cell and incrementing deferred set. Fix this by introducing
dm_cell_visit_release() and refactoring inc_remap_and_issue_cell()
accordingly.
Signed-off-by: Joe Thornber <ejt@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-thin.c')
-rw-r--r-- | drivers/md/dm-thin.c | 90 |
1 files changed, 61 insertions, 29 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 912d7f4d89d1..5036d4b3f368 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -343,6 +343,15 @@ static void cell_release(struct pool *pool, | |||
343 | dm_bio_prison_free_cell(pool->prison, cell); | 343 | dm_bio_prison_free_cell(pool->prison, cell); |
344 | } | 344 | } |
345 | 345 | ||
346 | static void cell_visit_release(struct pool *pool, | ||
347 | void (*fn)(void *, struct dm_bio_prison_cell *), | ||
348 | void *context, | ||
349 | struct dm_bio_prison_cell *cell) | ||
350 | { | ||
351 | dm_cell_visit_release(pool->prison, fn, context, cell); | ||
352 | dm_bio_prison_free_cell(pool->prison, cell); | ||
353 | } | ||
354 | |||
346 | static void cell_release_no_holder(struct pool *pool, | 355 | static void cell_release_no_holder(struct pool *pool, |
347 | struct dm_bio_prison_cell *cell, | 356 | struct dm_bio_prison_cell *cell, |
348 | struct bio_list *bios) | 357 | struct bio_list *bios) |
@@ -697,55 +706,75 @@ static void overwrite_endio(struct bio *bio, int err) | |||
697 | */ | 706 | */ |
698 | 707 | ||
699 | /* | 708 | /* |
700 | * This sends the bios in the cell back to the deferred_bios list. | 709 | * This sends the bios in the cell, except the original holder, back |
710 | * to the deferred_bios list. | ||
701 | */ | 711 | */ |
702 | static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell) | 712 | static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell) |
703 | { | 713 | { |
704 | struct pool *pool = tc->pool; | 714 | struct pool *pool = tc->pool; |
705 | unsigned long flags; | 715 | unsigned long flags; |
706 | 716 | ||
707 | spin_lock_irqsave(&tc->lock, flags); | 717 | spin_lock_irqsave(&tc->lock, flags); |
708 | cell_release(pool, cell, &tc->deferred_bio_list); | 718 | cell_release_no_holder(pool, cell, &tc->deferred_bio_list); |
709 | spin_unlock_irqrestore(&tc->lock, flags); | 719 | spin_unlock_irqrestore(&tc->lock, flags); |
710 | 720 | ||
711 | wake_worker(pool); | 721 | wake_worker(pool); |
712 | } | 722 | } |
713 | 723 | ||
714 | /* | 724 | static void thin_defer_bio(struct thin_c *tc, struct bio *bio); |
715 | * Same as cell_defer above, except it omits the original holder of the cell. | 725 | |
716 | */ | 726 | struct remap_info { |
717 | static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell) | 727 | struct thin_c *tc; |
728 | struct bio_list defer_bios; | ||
729 | struct bio_list issue_bios; | ||
730 | }; | ||
731 | |||
732 | static void __inc_remap_and_issue_cell(void *context, | ||
733 | struct dm_bio_prison_cell *cell) | ||
718 | { | 734 | { |
719 | struct pool *pool = tc->pool; | 735 | struct remap_info *info = context; |
720 | unsigned long flags; | 736 | struct bio *bio; |
721 | 737 | ||
722 | spin_lock_irqsave(&tc->lock, flags); | 738 | while ((bio = bio_list_pop(&cell->bios))) { |
723 | cell_release_no_holder(pool, cell, &tc->deferred_bio_list); | 739 | if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) |
724 | spin_unlock_irqrestore(&tc->lock, flags); | 740 | bio_list_add(&info->defer_bios, bio); |
741 | else { | ||
742 | inc_all_io_entry(info->tc->pool, bio); | ||
725 | 743 | ||
726 | wake_worker(pool); | 744 | /* |
745 | * We can't issue the bios with the bio prison lock | ||
746 | * held, so we add them to a list to issue on | ||
747 | * return from this function. | ||
748 | */ | ||
749 | bio_list_add(&info->issue_bios, bio); | ||
750 | } | ||
751 | } | ||
727 | } | 752 | } |
728 | 753 | ||
729 | static void thin_defer_bio(struct thin_c *tc, struct bio *bio); | ||
730 | |||
731 | static void inc_remap_and_issue_cell(struct thin_c *tc, | 754 | static void inc_remap_and_issue_cell(struct thin_c *tc, |
732 | struct dm_bio_prison_cell *cell, | 755 | struct dm_bio_prison_cell *cell, |
733 | dm_block_t block) | 756 | dm_block_t block) |
734 | { | 757 | { |
735 | struct bio *bio; | 758 | struct bio *bio; |
736 | struct bio_list bios; | 759 | struct remap_info info; |
737 | 760 | ||
738 | bio_list_init(&bios); | 761 | info.tc = tc; |
739 | cell_release_no_holder(tc->pool, cell, &bios); | 762 | bio_list_init(&info.defer_bios); |
763 | bio_list_init(&info.issue_bios); | ||
740 | 764 | ||
741 | while ((bio = bio_list_pop(&bios))) { | 765 | /* |
742 | if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) | 766 | * We have to be careful to inc any bios we're about to issue |
743 | thin_defer_bio(tc, bio); | 767 | * before the cell is released, and avoid a race with new bios |
744 | else { | 768 | * being added to the cell. |
745 | inc_all_io_entry(tc->pool, bio); | 769 | */ |
746 | remap_and_issue(tc, bio, block); | 770 | cell_visit_release(tc->pool, __inc_remap_and_issue_cell, |
747 | } | 771 | &info, cell); |
748 | } | 772 | |
773 | while ((bio = bio_list_pop(&info.defer_bios))) | ||
774 | thin_defer_bio(tc, bio); | ||
775 | |||
776 | while ((bio = bio_list_pop(&info.issue_bios))) | ||
777 | remap_and_issue(info.tc, bio, block); | ||
749 | } | 778 | } |
750 | 779 | ||
751 | static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) | 780 | static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) |
@@ -796,10 +825,13 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) | |||
796 | * the bios in the cell. | 825 | * the bios in the cell. |
797 | */ | 826 | */ |
798 | if (bio) { | 827 | if (bio) { |
799 | cell_defer_no_holder(tc, m->cell); | 828 | inc_remap_and_issue_cell(tc, m->cell, m->data_block); |
800 | bio_endio(bio, 0); | 829 | bio_endio(bio, 0); |
801 | } else | 830 | } else { |
802 | cell_defer(tc, m->cell); | 831 | inc_all_io_entry(tc->pool, m->cell->holder); |
832 | remap_and_issue(tc, m->cell->holder, m->data_block); | ||
833 | inc_remap_and_issue_cell(tc, m->cell, m->data_block); | ||
834 | } | ||
803 | 835 | ||
804 | out: | 836 | out: |
805 | list_del(&m->list); | 837 | list_del(&m->list); |