aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2014-11-07 15:09:46 -0500
committerMike Snitzer <snitzer@redhat.com>2014-11-19 11:25:36 -0500
commit80e96c5484be788f277eead9cabf88cf8e430419 (patch)
tree25054660674523866c2410ac9f79fa28414afceb /drivers/md
parentd67ee213fa5700c7da526fe5bcccd485cfa63d8b (diff)
dm thin: do not allow thin device activation while pool is suspended
Otherwise IO could be issued to the pool while it is suspended. Care was taken to properly interlock between the thin and thin-pool targets when accessing the pool's 'suspended' flag. The thin_ctr will not add a new thin device to the pool's active_thins list if the pool is susepended. Signed-off-by: Mike Snitzer <snitzer@redhat.com> Acked-by: Joe Thornber <ejt@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-thin.c55
1 files changed, 45 insertions, 10 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 64fd4de2986f..f1b53e31d868 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -224,6 +224,7 @@ struct pool {
224 224
225 struct pool_features pf; 225 struct pool_features pf;
226 bool low_water_triggered:1; /* A dm event has been sent */ 226 bool low_water_triggered:1; /* A dm event has been sent */
227 bool suspended:1;
227 228
228 struct dm_bio_prison *prison; 229 struct dm_bio_prison *prison;
229 struct dm_kcopyd_client *copier; 230 struct dm_kcopyd_client *copier;
@@ -2575,6 +2576,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
2575 INIT_LIST_HEAD(&pool->prepared_discards); 2576 INIT_LIST_HEAD(&pool->prepared_discards);
2576 INIT_LIST_HEAD(&pool->active_thins); 2577 INIT_LIST_HEAD(&pool->active_thins);
2577 pool->low_water_triggered = false; 2578 pool->low_water_triggered = false;
2579 pool->suspended = true;
2578 2580
2579 pool->shared_read_ds = dm_deferred_set_create(); 2581 pool->shared_read_ds = dm_deferred_set_create();
2580 if (!pool->shared_read_ds) { 2582 if (!pool->shared_read_ds) {
@@ -3119,12 +3121,36 @@ static void pool_resume(struct dm_target *ti)
3119 3121
3120 spin_lock_irqsave(&pool->lock, flags); 3122 spin_lock_irqsave(&pool->lock, flags);
3121 pool->low_water_triggered = false; 3123 pool->low_water_triggered = false;
3124 pool->suspended = false;
3122 spin_unlock_irqrestore(&pool->lock, flags); 3125 spin_unlock_irqrestore(&pool->lock, flags);
3126
3123 requeue_bios(pool); 3127 requeue_bios(pool);
3124 3128
3125 do_waker(&pool->waker.work); 3129 do_waker(&pool->waker.work);
3126} 3130}
3127 3131
3132static void pool_presuspend(struct dm_target *ti)
3133{
3134 struct pool_c *pt = ti->private;
3135 struct pool *pool = pt->pool;
3136 unsigned long flags;
3137
3138 spin_lock_irqsave(&pool->lock, flags);
3139 pool->suspended = true;
3140 spin_unlock_irqrestore(&pool->lock, flags);
3141}
3142
3143static void pool_presuspend_undo(struct dm_target *ti)
3144{
3145 struct pool_c *pt = ti->private;
3146 struct pool *pool = pt->pool;
3147 unsigned long flags;
3148
3149 spin_lock_irqsave(&pool->lock, flags);
3150 pool->suspended = false;
3151 spin_unlock_irqrestore(&pool->lock, flags);
3152}
3153
3128static void pool_postsuspend(struct dm_target *ti) 3154static void pool_postsuspend(struct dm_target *ti)
3129{ 3155{
3130 struct pool_c *pt = ti->private; 3156 struct pool_c *pt = ti->private;
@@ -3592,6 +3618,8 @@ static struct target_type pool_target = {
3592 .ctr = pool_ctr, 3618 .ctr = pool_ctr,
3593 .dtr = pool_dtr, 3619 .dtr = pool_dtr,
3594 .map = pool_map, 3620 .map = pool_map,
3621 .presuspend = pool_presuspend,
3622 .presuspend_undo = pool_presuspend_undo,
3595 .postsuspend = pool_postsuspend, 3623 .postsuspend = pool_postsuspend,
3596 .preresume = pool_preresume, 3624 .preresume = pool_preresume,
3597 .resume = pool_resume, 3625 .resume = pool_resume,
@@ -3721,18 +3749,18 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3721 if (get_pool_mode(tc->pool) == PM_FAIL) { 3749 if (get_pool_mode(tc->pool) == PM_FAIL) {
3722 ti->error = "Couldn't open thin device, Pool is in fail mode"; 3750 ti->error = "Couldn't open thin device, Pool is in fail mode";
3723 r = -EINVAL; 3751 r = -EINVAL;
3724 goto bad_thin_open; 3752 goto bad_pool;
3725 } 3753 }
3726 3754
3727 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td); 3755 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
3728 if (r) { 3756 if (r) {
3729 ti->error = "Couldn't open thin internal device"; 3757 ti->error = "Couldn't open thin internal device";
3730 goto bad_thin_open; 3758 goto bad_pool;
3731 } 3759 }
3732 3760
3733 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block); 3761 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
3734 if (r) 3762 if (r)
3735 goto bad_target_max_io_len; 3763 goto bad;
3736 3764
3737 ti->num_flush_bios = 1; 3765 ti->num_flush_bios = 1;
3738 ti->flush_supported = true; 3766 ti->flush_supported = true;
@@ -3747,14 +3775,16 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3747 ti->split_discard_bios = true; 3775 ti->split_discard_bios = true;
3748 } 3776 }
3749 3777
3750 dm_put(pool_md);
3751
3752 mutex_unlock(&dm_thin_pool_table.mutex); 3778 mutex_unlock(&dm_thin_pool_table.mutex);
3753 3779
3754 atomic_set(&tc->refcount, 1);
3755 init_completion(&tc->can_destroy);
3756
3757 spin_lock_irqsave(&tc->pool->lock, flags); 3780 spin_lock_irqsave(&tc->pool->lock, flags);
3781 if (tc->pool->suspended) {
3782 spin_unlock_irqrestore(&tc->pool->lock, flags);
3783 mutex_lock(&dm_thin_pool_table.mutex); /* reacquire for __pool_dec */
3784 ti->error = "Unable to activate thin device while pool is suspended";
3785 r = -EINVAL;
3786 goto bad;
3787 }
3758 list_add_tail_rcu(&tc->list, &tc->pool->active_thins); 3788 list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
3759 spin_unlock_irqrestore(&tc->pool->lock, flags); 3789 spin_unlock_irqrestore(&tc->pool->lock, flags);
3760 /* 3790 /*
@@ -3765,11 +3795,16 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3765 */ 3795 */
3766 synchronize_rcu(); 3796 synchronize_rcu();
3767 3797
3798 dm_put(pool_md);
3799
3800 atomic_set(&tc->refcount, 1);
3801 init_completion(&tc->can_destroy);
3802
3768 return 0; 3803 return 0;
3769 3804
3770bad_target_max_io_len: 3805bad:
3771 dm_pool_close_thin_device(tc->td); 3806 dm_pool_close_thin_device(tc->td);
3772bad_thin_open: 3807bad_pool:
3773 __pool_dec(tc->pool); 3808 __pool_dec(tc->pool);
3774bad_pool_lookup: 3809bad_pool_lookup:
3775 dm_put(pool_md); 3810 dm_put(pool_md);