aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2014-06-13 08:57:09 -0400
committerMike Snitzer <snitzer@redhat.com>2014-08-01 12:30:31 -0400
commit50f3c3efdd5773d90396be07a7ecaa58227ff906 (patch)
treea0d6222e44377d2283207d6da8de1d0e3467465d /drivers/md
parent6afbc01d75c0266d7da32e2ec1f54d579a35703d (diff)
dm thin: switch to an atomic_t for tracking pending new block preparations
Previously we used separate boolean values to track quiescing and copying actions. By switching to an atomic_t we can support blocks that need a partial copy and partial zero. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-thin.c29
1 files changed, 16 insertions, 13 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index fc9c848a60c9..4c9a3b5f4ff1 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -554,11 +554,16 @@ static void remap_and_issue(struct thin_c *tc, struct bio *bio,
554struct dm_thin_new_mapping { 554struct dm_thin_new_mapping {
555 struct list_head list; 555 struct list_head list;
556 556
557 bool quiesced:1;
558 bool prepared:1;
559 bool pass_discard:1; 557 bool pass_discard:1;
560 bool definitely_not_shared:1; 558 bool definitely_not_shared:1;
561 559
560 /*
561 * Track quiescing, copying and zeroing preparation actions. When this
562 * counter hits zero the block is prepared and can be inserted into the
563 * btree.
564 */
565 atomic_t prepare_actions;
566
562 int err; 567 int err;
563 struct thin_c *tc; 568 struct thin_c *tc;
564 dm_block_t virt_block; 569 dm_block_t virt_block;
@@ -575,11 +580,11 @@ struct dm_thin_new_mapping {
575 bio_end_io_t *saved_bi_end_io; 580 bio_end_io_t *saved_bi_end_io;
576}; 581};
577 582
578static void __maybe_add_mapping(struct dm_thin_new_mapping *m) 583static void __complete_mapping_preparation(struct dm_thin_new_mapping *m)
579{ 584{
580 struct pool *pool = m->tc->pool; 585 struct pool *pool = m->tc->pool;
581 586
582 if (m->quiesced && m->prepared) { 587 if (atomic_dec_and_test(&m->prepare_actions)) {
583 list_add_tail(&m->list, &pool->prepared_mappings); 588 list_add_tail(&m->list, &pool->prepared_mappings);
584 wake_worker(pool); 589 wake_worker(pool);
585 } 590 }
@@ -594,8 +599,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
594 m->err = read_err || write_err ? -EIO : 0; 599 m->err = read_err || write_err ? -EIO : 0;
595 600
596 spin_lock_irqsave(&pool->lock, flags); 601 spin_lock_irqsave(&pool->lock, flags);
597 m->prepared = true; 602 __complete_mapping_preparation(m);
598 __maybe_add_mapping(m);
599 spin_unlock_irqrestore(&pool->lock, flags); 603 spin_unlock_irqrestore(&pool->lock, flags);
600} 604}
601 605
@@ -609,8 +613,7 @@ static void overwrite_endio(struct bio *bio, int err)
609 m->err = err; 613 m->err = err;
610 614
611 spin_lock_irqsave(&pool->lock, flags); 615 spin_lock_irqsave(&pool->lock, flags);
612 m->prepared = true; 616 __complete_mapping_preparation(m);
613 __maybe_add_mapping(m);
614 spin_unlock_irqrestore(&pool->lock, flags); 617 spin_unlock_irqrestore(&pool->lock, flags);
615} 618}
616 619
@@ -836,7 +839,9 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
836 m->cell = cell; 839 m->cell = cell;
837 840
838 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list)) 841 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
839 m->quiesced = true; 842 atomic_set(&m->prepare_actions, 1); /* copy only */
843 else
844 atomic_set(&m->prepare_actions, 2); /* quiesce + copy */
840 845
841 /* 846 /*
842 * IO to pool_dev remaps to the pool target's data_dev. 847 * IO to pool_dev remaps to the pool target's data_dev.
@@ -896,8 +901,7 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
896 struct pool *pool = tc->pool; 901 struct pool *pool = tc->pool;
897 struct dm_thin_new_mapping *m = get_next_mapping(pool); 902 struct dm_thin_new_mapping *m = get_next_mapping(pool);
898 903
899 m->quiesced = true; 904 atomic_set(&m->prepare_actions, 1); /* no need to quiesce */
900 m->prepared = false;
901 m->tc = tc; 905 m->tc = tc;
902 m->virt_block = virt_block; 906 m->virt_block = virt_block;
903 m->data_block = data_block; 907 m->data_block = data_block;
@@ -3361,8 +3365,7 @@ static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
3361 spin_lock_irqsave(&pool->lock, flags); 3365 spin_lock_irqsave(&pool->lock, flags);
3362 list_for_each_entry_safe(m, tmp, &work, list) { 3366 list_for_each_entry_safe(m, tmp, &work, list) {
3363 list_del(&m->list); 3367 list_del(&m->list);
3364 m->quiesced = true; 3368 __complete_mapping_preparation(m);
3365 __maybe_add_mapping(m);
3366 } 3369 }
3367 spin_unlock_irqrestore(&pool->lock, flags); 3370 spin_unlock_irqrestore(&pool->lock, flags);
3368 } 3371 }