aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorMikulas Patocka <mpatocka@redhat.com>2014-06-14 13:44:31 -0400
committerMike Snitzer <snitzer@redhat.com>2014-07-10 16:44:13 -0400
commitacfe0ad74d2e1bfc81d1d7bf5e15b043985d3650 (patch)
tree06543d0970d34acff9e4c52bab1691e73f0f132a /drivers/md
parent4c834452aad01531db949414f94f817a86348d59 (diff)
dm: allocate a special workqueue for deferred device removal
The commit 2c140a246dc ("dm: allow remove to be deferred") introduced a deferred removal feature for the device mapper. When this feature is used (by passing a flag DM_DEFERRED_REMOVE to DM_DEV_REMOVE_CMD ioctl) and the user tries to remove a device that is currently in use, the device will be removed automatically in the future when the last user closes it. Device mapper used the system workqueue to perform deferred removals. However, some targets (dm-raid1, dm-mpath, dm-stripe) flush work items scheduled for the system workqueue from their destructor. If the destructor itself is called from the system workqueue during deferred removal, it introduces a possible deadlock - the workqueue tries to flush itself. Fix this possible deadlock by introducing a new workqueue for deferred removals. We allocate just one workqueue for all dm targets. The ability of dm targets to process IOs isn't dependent on deferred removal of unused targets, so a deadlock due to shared workqueue isn't possible. Also, cleanup local_init() to eliminate potential for returning success on failure. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com> Cc: stable@vger.kernel.org # 3.13+
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 437d99045ef2..32b958dbc499 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -54,6 +54,8 @@ static void do_deferred_remove(struct work_struct *w);
54 54
55static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 55static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
56 56
57static struct workqueue_struct *deferred_remove_workqueue;
58
57/* 59/*
58 * For bio-based dm. 60 * For bio-based dm.
59 * One of these is allocated per bio. 61 * One of these is allocated per bio.
@@ -276,16 +278,24 @@ static int __init local_init(void)
276 if (r) 278 if (r)
277 goto out_free_rq_tio_cache; 279 goto out_free_rq_tio_cache;
278 280
281 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
282 if (!deferred_remove_workqueue) {
283 r = -ENOMEM;
284 goto out_uevent_exit;
285 }
286
279 _major = major; 287 _major = major;
280 r = register_blkdev(_major, _name); 288 r = register_blkdev(_major, _name);
281 if (r < 0) 289 if (r < 0)
282 goto out_uevent_exit; 290 goto out_free_workqueue;
283 291
284 if (!_major) 292 if (!_major)
285 _major = r; 293 _major = r;
286 294
287 return 0; 295 return 0;
288 296
297out_free_workqueue:
298 destroy_workqueue(deferred_remove_workqueue);
289out_uevent_exit: 299out_uevent_exit:
290 dm_uevent_exit(); 300 dm_uevent_exit();
291out_free_rq_tio_cache: 301out_free_rq_tio_cache:
@@ -299,6 +309,7 @@ out_free_io_cache:
299static void local_exit(void) 309static void local_exit(void)
300{ 310{
301 flush_scheduled_work(); 311 flush_scheduled_work();
312 destroy_workqueue(deferred_remove_workqueue);
302 313
303 kmem_cache_destroy(_rq_tio_cache); 314 kmem_cache_destroy(_rq_tio_cache);
304 kmem_cache_destroy(_io_cache); 315 kmem_cache_destroy(_io_cache);
@@ -407,7 +418,7 @@ static void dm_blk_close(struct gendisk *disk, fmode_t mode)
407 418
408 if (atomic_dec_and_test(&md->open_count) && 419 if (atomic_dec_and_test(&md->open_count) &&
409 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 420 (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
410 schedule_work(&deferred_remove_work); 421 queue_work(deferred_remove_workqueue, &deferred_remove_work);
411 422
412 dm_put(md); 423 dm_put(md);
413 424