aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorChandra Seetharaman <sekharan@us.ibm.com>2008-05-01 17:50:22 -0400
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2008-06-05 10:23:41 -0400
commitbab7cfc733f4453a502b7491b9ee37b091440ec4 (patch)
tree147e7f4bf901e6cfe55a3e2e08d5f3b174d88ccd /drivers/md
parentcfae5c9bb66325cd32d5f2ee41f14749f062a53c (diff)
[SCSI] scsi_dh: Add a single threaded workqueue for initializing paths
Before this patch set (SCSI hardware handlers), initialization of a path was done asynchronously. Doing that requires a workqueue in each device/hardware handler module and leads to unneccessary complication in the device handler code, making it difficult to read the code and follow the state diagram. Moving that workqueue to this level makes the device handler code simpler. Hence, the workqueue is moved to dm level. A new workqueue is added instead of adding it to the existing workqueue (kmpathd) for the following reasons: 1. Device activation has to happen faster, stacking them along with the other workqueue might lead to unnecessary delay in the activation of the path. 2. The effect could be felt the other way too. i.e the current events that are handled by the existing workqueue might get a delayed response. Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com> Acked-by: Alasdair G Kergon <agk@redhat.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-mpath.c41
1 files changed, 34 insertions, 7 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index e54ff372d711..9b16788118d2 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -63,6 +63,7 @@ struct multipath {
63 spinlock_t lock; 63 spinlock_t lock;
64 64
65 const char *hw_handler_name; 65 const char *hw_handler_name;
66 struct work_struct activate_path;
66 unsigned nr_priority_groups; 67 unsigned nr_priority_groups;
67 struct list_head priority_groups; 68 struct list_head priority_groups;
68 unsigned pg_init_required; /* pg_init needs calling? */ 69 unsigned pg_init_required; /* pg_init needs calling? */
@@ -107,10 +108,10 @@ typedef int (*action_fn) (struct pgpath *pgpath);
107 108
108static struct kmem_cache *_mpio_cache; 109static struct kmem_cache *_mpio_cache;
109 110
110static struct workqueue_struct *kmultipathd; 111static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
111static void process_queued_ios(struct work_struct *work); 112static void process_queued_ios(struct work_struct *work);
112static void trigger_event(struct work_struct *work); 113static void trigger_event(struct work_struct *work);
113static void pg_init_done(struct dm_path *, int); 114static void activate_path(struct work_struct *work);
114 115
115 116
116/*----------------------------------------------- 117/*-----------------------------------------------
@@ -180,6 +181,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
180 m->queue_io = 1; 181 m->queue_io = 1;
181 INIT_WORK(&m->process_queued_ios, process_queued_ios); 182 INIT_WORK(&m->process_queued_ios, process_queued_ios);
182 INIT_WORK(&m->trigger_event, trigger_event); 183 INIT_WORK(&m->trigger_event, trigger_event);
184 INIT_WORK(&m->activate_path, activate_path);
183 m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); 185 m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
184 if (!m->mpio_pool) { 186 if (!m->mpio_pool) {
185 kfree(m); 187 kfree(m);
@@ -432,11 +434,8 @@ static void process_queued_ios(struct work_struct *work)
432out: 434out:
433 spin_unlock_irqrestore(&m->lock, flags); 435 spin_unlock_irqrestore(&m->lock, flags);
434 436
435 if (init_required) { 437 if (init_required)
436 struct dm_path *path = &pgpath->path; 438 queue_work(kmpath_handlerd, &m->activate_path);
437 int ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev));
438 pg_init_done(path, ret);
439 }
440 439
441 if (!must_queue) 440 if (!must_queue)
442 dispatch_queued_ios(m); 441 dispatch_queued_ios(m);
@@ -791,6 +790,7 @@ static void multipath_dtr(struct dm_target *ti)
791{ 790{
792 struct multipath *m = (struct multipath *) ti->private; 791 struct multipath *m = (struct multipath *) ti->private;
793 792
793 flush_workqueue(kmpath_handlerd);
794 flush_workqueue(kmultipathd); 794 flush_workqueue(kmultipathd);
795 free_multipath(m); 795 free_multipath(m);
796} 796}
@@ -1108,6 +1108,17 @@ static void pg_init_done(struct dm_path *path, int errors)
1108 spin_unlock_irqrestore(&m->lock, flags); 1108 spin_unlock_irqrestore(&m->lock, flags);
1109} 1109}
1110 1110
1111static void activate_path(struct work_struct *work)
1112{
1113 int ret;
1114 struct multipath *m =
1115 container_of(work, struct multipath, activate_path);
1116 struct dm_path *path = &m->current_pgpath->path;
1117
1118 ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev));
1119 pg_init_done(path, ret);
1120}
1121
1111/* 1122/*
1112 * end_io handling 1123 * end_io handling
1113 */ 1124 */
@@ -1451,6 +1462,21 @@ static int __init dm_multipath_init(void)
1451 return -ENOMEM; 1462 return -ENOMEM;
1452 } 1463 }
1453 1464
1465 /*
1466 * A separate workqueue is used to handle the device handlers
1467 * to avoid overloading existing workqueue. Overloading the
1468 * old workqueue would also create a bottleneck in the
1469 * path of the storage hardware device activation.
1470 */
1471 kmpath_handlerd = create_singlethread_workqueue("kmpath_handlerd");
1472 if (!kmpath_handlerd) {
1473 DMERR("failed to create workqueue kmpath_handlerd");
1474 destroy_workqueue(kmultipathd);
1475 dm_unregister_target(&multipath_target);
1476 kmem_cache_destroy(_mpio_cache);
1477 return -ENOMEM;
1478 }
1479
1454 DMINFO("version %u.%u.%u loaded", 1480 DMINFO("version %u.%u.%u loaded",
1455 multipath_target.version[0], multipath_target.version[1], 1481 multipath_target.version[0], multipath_target.version[1],
1456 multipath_target.version[2]); 1482 multipath_target.version[2]);
@@ -1462,6 +1488,7 @@ static void __exit dm_multipath_exit(void)
1462{ 1488{
1463 int r; 1489 int r;
1464 1490
1491 destroy_workqueue(kmpath_handlerd);
1465 destroy_workqueue(kmultipathd); 1492 destroy_workqueue(kmultipathd);
1466 1493
1467 r = dm_unregister_target(&multipath_target); 1494 r = dm_unregister_target(&multipath_target);