diff options
-rw-r--r-- | drivers/md/dm-mpath.c | 41 |
1 files changed, 34 insertions, 7 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index e54ff372d711..9b16788118d2 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -63,6 +63,7 @@ struct multipath { | |||
63 | spinlock_t lock; | 63 | spinlock_t lock; |
64 | 64 | ||
65 | const char *hw_handler_name; | 65 | const char *hw_handler_name; |
66 | struct work_struct activate_path; | ||
66 | unsigned nr_priority_groups; | 67 | unsigned nr_priority_groups; |
67 | struct list_head priority_groups; | 68 | struct list_head priority_groups; |
68 | unsigned pg_init_required; /* pg_init needs calling? */ | 69 | unsigned pg_init_required; /* pg_init needs calling? */ |
@@ -107,10 +108,10 @@ typedef int (*action_fn) (struct pgpath *pgpath); | |||
107 | 108 | ||
108 | static struct kmem_cache *_mpio_cache; | 109 | static struct kmem_cache *_mpio_cache; |
109 | 110 | ||
110 | static struct workqueue_struct *kmultipathd; | 111 | static struct workqueue_struct *kmultipathd, *kmpath_handlerd; |
111 | static void process_queued_ios(struct work_struct *work); | 112 | static void process_queued_ios(struct work_struct *work); |
112 | static void trigger_event(struct work_struct *work); | 113 | static void trigger_event(struct work_struct *work); |
113 | static void pg_init_done(struct dm_path *, int); | 114 | static void activate_path(struct work_struct *work); |
114 | 115 | ||
115 | 116 | ||
116 | /*----------------------------------------------- | 117 | /*----------------------------------------------- |
@@ -180,6 +181,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti) | |||
180 | m->queue_io = 1; | 181 | m->queue_io = 1; |
181 | INIT_WORK(&m->process_queued_ios, process_queued_ios); | 182 | INIT_WORK(&m->process_queued_ios, process_queued_ios); |
182 | INIT_WORK(&m->trigger_event, trigger_event); | 183 | INIT_WORK(&m->trigger_event, trigger_event); |
184 | INIT_WORK(&m->activate_path, activate_path); | ||
183 | m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); | 185 | m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); |
184 | if (!m->mpio_pool) { | 186 | if (!m->mpio_pool) { |
185 | kfree(m); | 187 | kfree(m); |
@@ -432,11 +434,8 @@ static void process_queued_ios(struct work_struct *work) | |||
432 | out: | 434 | out: |
433 | spin_unlock_irqrestore(&m->lock, flags); | 435 | spin_unlock_irqrestore(&m->lock, flags); |
434 | 436 | ||
435 | if (init_required) { | 437 | if (init_required) |
436 | struct dm_path *path = &pgpath->path; | 438 | queue_work(kmpath_handlerd, &m->activate_path); |
437 | int ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev)); | ||
438 | pg_init_done(path, ret); | ||
439 | } | ||
440 | 439 | ||
441 | if (!must_queue) | 440 | if (!must_queue) |
442 | dispatch_queued_ios(m); | 441 | dispatch_queued_ios(m); |
@@ -791,6 +790,7 @@ static void multipath_dtr(struct dm_target *ti) | |||
791 | { | 790 | { |
792 | struct multipath *m = (struct multipath *) ti->private; | 791 | struct multipath *m = (struct multipath *) ti->private; |
793 | 792 | ||
793 | flush_workqueue(kmpath_handlerd); | ||
794 | flush_workqueue(kmultipathd); | 794 | flush_workqueue(kmultipathd); |
795 | free_multipath(m); | 795 | free_multipath(m); |
796 | } | 796 | } |
@@ -1108,6 +1108,17 @@ static void pg_init_done(struct dm_path *path, int errors) | |||
1108 | spin_unlock_irqrestore(&m->lock, flags); | 1108 | spin_unlock_irqrestore(&m->lock, flags); |
1109 | } | 1109 | } |
1110 | 1110 | ||
1111 | static void activate_path(struct work_struct *work) | ||
1112 | { | ||
1113 | int ret; | ||
1114 | struct multipath *m = | ||
1115 | container_of(work, struct multipath, activate_path); | ||
1116 | struct dm_path *path = &m->current_pgpath->path; | ||
1117 | |||
1118 | ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev)); | ||
1119 | pg_init_done(path, ret); | ||
1120 | } | ||
1121 | |||
1111 | /* | 1122 | /* |
1112 | * end_io handling | 1123 | * end_io handling |
1113 | */ | 1124 | */ |
@@ -1451,6 +1462,21 @@ static int __init dm_multipath_init(void) | |||
1451 | return -ENOMEM; | 1462 | return -ENOMEM; |
1452 | } | 1463 | } |
1453 | 1464 | ||
1465 | /* | ||
1466 | * A separate workqueue is used to handle the device handlers | ||
1467 | * to avoid overloading existing workqueue. Overloading the | ||
1468 | * old workqueue would also create a bottleneck in the | ||
1469 | * path of the storage hardware device activation. | ||
1470 | */ | ||
1471 | kmpath_handlerd = create_singlethread_workqueue("kmpath_handlerd"); | ||
1472 | if (!kmpath_handlerd) { | ||
1473 | DMERR("failed to create workqueue kmpath_handlerd"); | ||
1474 | destroy_workqueue(kmultipathd); | ||
1475 | dm_unregister_target(&multipath_target); | ||
1476 | kmem_cache_destroy(_mpio_cache); | ||
1477 | return -ENOMEM; | ||
1478 | } | ||
1479 | |||
1454 | DMINFO("version %u.%u.%u loaded", | 1480 | DMINFO("version %u.%u.%u loaded", |
1455 | multipath_target.version[0], multipath_target.version[1], | 1481 | multipath_target.version[0], multipath_target.version[1], |
1456 | multipath_target.version[2]); | 1482 | multipath_target.version[2]); |
@@ -1462,6 +1488,7 @@ static void __exit dm_multipath_exit(void) | |||
1462 | { | 1488 | { |
1463 | int r; | 1489 | int r; |
1464 | 1490 | ||
1491 | destroy_workqueue(kmpath_handlerd); | ||
1465 | destroy_workqueue(kmultipathd); | 1492 | destroy_workqueue(kmultipathd); |
1466 | 1493 | ||
1467 | r = dm_unregister_target(&multipath_target); | 1494 | r = dm_unregister_target(&multipath_target); |