diff options
author | Mike Snitzer <snitzer@redhat.com> | 2014-10-28 20:58:45 -0400 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2014-11-19 12:34:08 -0500 |
commit | 583024d248f486e21479d1912aa2093565455770 (patch) | |
tree | 1bd3b82b4a6db8fff1fb6fa4aa58e5a6596263a0 /drivers/md | |
parent | ffcc39364160663cda1a3c358f4537302a92459b (diff) |
dm thin: suspend/resume active thin devices when reloading thin-pool
Before this change it was expected that userspace would first suspend
all active thin devices, reload/resize the thin-pool target, then resume
all active thin devices. Now the thin-pool suspend/resume will trigger
the suspend/resume of all active thins via appropriate calls to
dm_internal_suspend and dm_internal_resume.
Store the mapped_device for each thin device in struct thin_c to make
these calls possible.
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Acked-by: Joe Thornber <ejt@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-thin.c | 40 |
1 files changed, 38 insertions, 2 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index f1b53e31d868..e9e9584fe769 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -292,6 +292,8 @@ struct thin_c { | |||
292 | 292 | ||
293 | struct pool *pool; | 293 | struct pool *pool; |
294 | struct dm_thin_device *td; | 294 | struct dm_thin_device *td; |
295 | struct mapped_device *thin_md; | ||
296 | |||
295 | bool requeue_mode:1; | 297 | bool requeue_mode:1; |
296 | spinlock_t lock; | 298 | spinlock_t lock; |
297 | struct list_head deferred_cells; | 299 | struct list_head deferred_cells; |
@@ -3113,19 +3115,48 @@ static int pool_preresume(struct dm_target *ti) | |||
3113 | return 0; | 3115 | return 0; |
3114 | } | 3116 | } |
3115 | 3117 | ||
3118 | static void pool_suspend_active_thins(struct pool *pool) | ||
3119 | { | ||
3120 | struct thin_c *tc; | ||
3121 | |||
3122 | /* Suspend all active thin devices */ | ||
3123 | tc = get_first_thin(pool); | ||
3124 | while (tc) { | ||
3125 | dm_internal_suspend_noflush(tc->thin_md); | ||
3126 | tc = get_next_thin(pool, tc); | ||
3127 | } | ||
3128 | } | ||
3129 | |||
3130 | static void pool_resume_active_thins(struct pool *pool) | ||
3131 | { | ||
3132 | struct thin_c *tc; | ||
3133 | |||
3134 | /* Resume all active thin devices */ | ||
3135 | tc = get_first_thin(pool); | ||
3136 | while (tc) { | ||
3137 | dm_internal_resume(tc->thin_md); | ||
3138 | tc = get_next_thin(pool, tc); | ||
3139 | } | ||
3140 | } | ||
3141 | |||
3116 | static void pool_resume(struct dm_target *ti) | 3142 | static void pool_resume(struct dm_target *ti) |
3117 | { | 3143 | { |
3118 | struct pool_c *pt = ti->private; | 3144 | struct pool_c *pt = ti->private; |
3119 | struct pool *pool = pt->pool; | 3145 | struct pool *pool = pt->pool; |
3120 | unsigned long flags; | 3146 | unsigned long flags; |
3121 | 3147 | ||
3148 | /* | ||
3149 | * Must requeue active_thins' bios and then resume | ||
3150 | * active_thins _before_ clearing 'suspend' flag. | ||
3151 | */ | ||
3152 | requeue_bios(pool); | ||
3153 | pool_resume_active_thins(pool); | ||
3154 | |||
3122 | spin_lock_irqsave(&pool->lock, flags); | 3155 | spin_lock_irqsave(&pool->lock, flags); |
3123 | pool->low_water_triggered = false; | 3156 | pool->low_water_triggered = false; |
3124 | pool->suspended = false; | 3157 | pool->suspended = false; |
3125 | spin_unlock_irqrestore(&pool->lock, flags); | 3158 | spin_unlock_irqrestore(&pool->lock, flags); |
3126 | 3159 | ||
3127 | requeue_bios(pool); | ||
3128 | |||
3129 | do_waker(&pool->waker.work); | 3160 | do_waker(&pool->waker.work); |
3130 | } | 3161 | } |
3131 | 3162 | ||
@@ -3138,6 +3169,8 @@ static void pool_presuspend(struct dm_target *ti) | |||
3138 | spin_lock_irqsave(&pool->lock, flags); | 3169 | spin_lock_irqsave(&pool->lock, flags); |
3139 | pool->suspended = true; | 3170 | pool->suspended = true; |
3140 | spin_unlock_irqrestore(&pool->lock, flags); | 3171 | spin_unlock_irqrestore(&pool->lock, flags); |
3172 | |||
3173 | pool_suspend_active_thins(pool); | ||
3141 | } | 3174 | } |
3142 | 3175 | ||
3143 | static void pool_presuspend_undo(struct dm_target *ti) | 3176 | static void pool_presuspend_undo(struct dm_target *ti) |
@@ -3146,6 +3179,8 @@ static void pool_presuspend_undo(struct dm_target *ti) | |||
3146 | struct pool *pool = pt->pool; | 3179 | struct pool *pool = pt->pool; |
3147 | unsigned long flags; | 3180 | unsigned long flags; |
3148 | 3181 | ||
3182 | pool_resume_active_thins(pool); | ||
3183 | |||
3149 | spin_lock_irqsave(&pool->lock, flags); | 3184 | spin_lock_irqsave(&pool->lock, flags); |
3150 | pool->suspended = false; | 3185 | pool->suspended = false; |
3151 | spin_unlock_irqrestore(&pool->lock, flags); | 3186 | spin_unlock_irqrestore(&pool->lock, flags); |
@@ -3703,6 +3738,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
3703 | r = -ENOMEM; | 3738 | r = -ENOMEM; |
3704 | goto out_unlock; | 3739 | goto out_unlock; |
3705 | } | 3740 | } |
3741 | tc->thin_md = dm_table_get_md(ti->table); | ||
3706 | spin_lock_init(&tc->lock); | 3742 | spin_lock_init(&tc->lock); |
3707 | INIT_LIST_HEAD(&tc->deferred_cells); | 3743 | INIT_LIST_HEAD(&tc->deferred_cells); |
3708 | bio_list_init(&tc->deferred_bio_list); | 3744 | bio_list_init(&tc->deferred_bio_list); |