aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-mpath.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-mpath.c')
-rw-r--r--drivers/md/dm-mpath.c67
1 files changed, 42 insertions, 25 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 487ecda90ad4..b82d28819e2a 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -23,6 +23,8 @@
23 23
24#define DM_MSG_PREFIX "multipath" 24#define DM_MSG_PREFIX "multipath"
25#define MESG_STR(x) x, sizeof(x) 25#define MESG_STR(x) x, sizeof(x)
26#define DM_PG_INIT_DELAY_MSECS 2000
27#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
26 28
27/* Path properties */ 29/* Path properties */
28struct pgpath { 30struct pgpath {
@@ -33,8 +35,7 @@ struct pgpath {
33 unsigned fail_count; /* Cumulative failure count */ 35 unsigned fail_count; /* Cumulative failure count */
34 36
35 struct dm_path path; 37 struct dm_path path;
36 struct work_struct deactivate_path; 38 struct delayed_work activate_path;
37 struct work_struct activate_path;
38}; 39};
39 40
40#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path) 41#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
@@ -65,11 +66,15 @@ struct multipath {
65 66
66 const char *hw_handler_name; 67 const char *hw_handler_name;
67 char *hw_handler_params; 68 char *hw_handler_params;
69
68 unsigned nr_priority_groups; 70 unsigned nr_priority_groups;
69 struct list_head priority_groups; 71 struct list_head priority_groups;
72
73 wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
74
70 unsigned pg_init_required; /* pg_init needs calling? */ 75 unsigned pg_init_required; /* pg_init needs calling? */
71 unsigned pg_init_in_progress; /* Only one pg_init allowed at once */ 76 unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
72 wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */ 77 unsigned pg_init_delay_retry; /* Delay pg_init retry? */
73 78
74 unsigned nr_valid_paths; /* Total number of usable paths */ 79 unsigned nr_valid_paths; /* Total number of usable paths */
75 struct pgpath *current_pgpath; 80 struct pgpath *current_pgpath;
@@ -82,6 +87,7 @@ struct multipath {
82 unsigned saved_queue_if_no_path;/* Saved state during suspension */ 87 unsigned saved_queue_if_no_path;/* Saved state during suspension */
83 unsigned pg_init_retries; /* Number of times to retry pg_init */ 88 unsigned pg_init_retries; /* Number of times to retry pg_init */
84 unsigned pg_init_count; /* Number of times pg_init called */ 89 unsigned pg_init_count; /* Number of times pg_init called */
90 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
85 91
86 struct work_struct process_queued_ios; 92 struct work_struct process_queued_ios;
87 struct list_head queued_ios; 93 struct list_head queued_ios;
@@ -116,7 +122,6 @@ static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
116static void process_queued_ios(struct work_struct *work); 122static void process_queued_ios(struct work_struct *work);
117static void trigger_event(struct work_struct *work); 123static void trigger_event(struct work_struct *work);
118static void activate_path(struct work_struct *work); 124static void activate_path(struct work_struct *work);
119static void deactivate_path(struct work_struct *work);
120 125
121 126
122/*----------------------------------------------- 127/*-----------------------------------------------
@@ -129,8 +134,7 @@ static struct pgpath *alloc_pgpath(void)
129 134
130 if (pgpath) { 135 if (pgpath) {
131 pgpath->is_active = 1; 136 pgpath->is_active = 1;
132 INIT_WORK(&pgpath->deactivate_path, deactivate_path); 137 INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
133 INIT_WORK(&pgpath->activate_path, activate_path);
134 } 138 }
135 139
136 return pgpath; 140 return pgpath;
@@ -141,14 +145,6 @@ static void free_pgpath(struct pgpath *pgpath)
141 kfree(pgpath); 145 kfree(pgpath);
142} 146}
143 147
144static void deactivate_path(struct work_struct *work)
145{
146 struct pgpath *pgpath =
147 container_of(work, struct pgpath, deactivate_path);
148
149 blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue);
150}
151
152static struct priority_group *alloc_priority_group(void) 148static struct priority_group *alloc_priority_group(void)
153{ 149{
154 struct priority_group *pg; 150 struct priority_group *pg;
@@ -199,6 +195,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
199 INIT_LIST_HEAD(&m->queued_ios); 195 INIT_LIST_HEAD(&m->queued_ios);
200 spin_lock_init(&m->lock); 196 spin_lock_init(&m->lock);
201 m->queue_io = 1; 197 m->queue_io = 1;
198 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
202 INIT_WORK(&m->process_queued_ios, process_queued_ios); 199 INIT_WORK(&m->process_queued_ios, process_queued_ios);
203 INIT_WORK(&m->trigger_event, trigger_event); 200 INIT_WORK(&m->trigger_event, trigger_event);
204 init_waitqueue_head(&m->pg_init_wait); 201 init_waitqueue_head(&m->pg_init_wait);
@@ -238,14 +235,19 @@ static void free_multipath(struct multipath *m)
238static void __pg_init_all_paths(struct multipath *m) 235static void __pg_init_all_paths(struct multipath *m)
239{ 236{
240 struct pgpath *pgpath; 237 struct pgpath *pgpath;
238 unsigned long pg_init_delay = 0;
241 239
242 m->pg_init_count++; 240 m->pg_init_count++;
243 m->pg_init_required = 0; 241 m->pg_init_required = 0;
242 if (m->pg_init_delay_retry)
243 pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
244 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
244 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) { 245 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
245 /* Skip failed paths */ 246 /* Skip failed paths */
246 if (!pgpath->is_active) 247 if (!pgpath->is_active)
247 continue; 248 continue;
248 if (queue_work(kmpath_handlerd, &pgpath->activate_path)) 249 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
250 pg_init_delay))
249 m->pg_init_in_progress++; 251 m->pg_init_in_progress++;
250 } 252 }
251} 253}
@@ -793,8 +795,9 @@ static int parse_features(struct arg_set *as, struct multipath *m)
793 const char *param_name; 795 const char *param_name;
794 796
795 static struct param _params[] = { 797 static struct param _params[] = {
796 {0, 3, "invalid number of feature args"}, 798 {0, 5, "invalid number of feature args"},
797 {1, 50, "pg_init_retries must be between 1 and 50"}, 799 {1, 50, "pg_init_retries must be between 1 and 50"},
800 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
798 }; 801 };
799 802
800 r = read_param(_params, shift(as), &argc, &ti->error); 803 r = read_param(_params, shift(as), &argc, &ti->error);
@@ -821,6 +824,14 @@ static int parse_features(struct arg_set *as, struct multipath *m)
821 continue; 824 continue;
822 } 825 }
823 826
827 if (!strnicmp(param_name, MESG_STR("pg_init_delay_msecs")) &&
828 (argc >= 1)) {
829 r = read_param(_params + 2, shift(as),
830 &m->pg_init_delay_msecs, &ti->error);
831 argc--;
832 continue;
833 }
834
824 ti->error = "Unrecognised multipath feature request"; 835 ti->error = "Unrecognised multipath feature request";
825 r = -EINVAL; 836 r = -EINVAL;
826 } while (argc && !r); 837 } while (argc && !r);
@@ -931,7 +942,7 @@ static void flush_multipath_work(struct multipath *m)
931 flush_workqueue(kmpath_handlerd); 942 flush_workqueue(kmpath_handlerd);
932 multipath_wait_for_pg_init_completion(m); 943 multipath_wait_for_pg_init_completion(m);
933 flush_workqueue(kmultipathd); 944 flush_workqueue(kmultipathd);
934 flush_scheduled_work(); 945 flush_work_sync(&m->trigger_event);
935} 946}
936 947
937static void multipath_dtr(struct dm_target *ti) 948static void multipath_dtr(struct dm_target *ti)
@@ -995,7 +1006,6 @@ static int fail_path(struct pgpath *pgpath)
995 pgpath->path.dev->name, m->nr_valid_paths); 1006 pgpath->path.dev->name, m->nr_valid_paths);
996 1007
997 schedule_work(&m->trigger_event); 1008 schedule_work(&m->trigger_event);
998 queue_work(kmultipathd, &pgpath->deactivate_path);
999 1009
1000out: 1010out:
1001 spin_unlock_irqrestore(&m->lock, flags); 1011 spin_unlock_irqrestore(&m->lock, flags);
@@ -1034,7 +1044,7 @@ static int reinstate_path(struct pgpath *pgpath)
1034 m->current_pgpath = NULL; 1044 m->current_pgpath = NULL;
1035 queue_work(kmultipathd, &m->process_queued_ios); 1045 queue_work(kmultipathd, &m->process_queued_ios);
1036 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { 1046 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
1037 if (queue_work(kmpath_handlerd, &pgpath->activate_path)) 1047 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
1038 m->pg_init_in_progress++; 1048 m->pg_init_in_progress++;
1039 } 1049 }
1040 1050
@@ -1169,6 +1179,7 @@ static void pg_init_done(void *data, int errors)
1169 struct priority_group *pg = pgpath->pg; 1179 struct priority_group *pg = pgpath->pg;
1170 struct multipath *m = pg->m; 1180 struct multipath *m = pg->m;
1171 unsigned long flags; 1181 unsigned long flags;
1182 unsigned delay_retry = 0;
1172 1183
1173 /* device or driver problems */ 1184 /* device or driver problems */
1174 switch (errors) { 1185 switch (errors) {
@@ -1193,8 +1204,9 @@ static void pg_init_done(void *data, int errors)
1193 */ 1204 */
1194 bypass_pg(m, pg, 1); 1205 bypass_pg(m, pg, 1);
1195 break; 1206 break;
1196 /* TODO: For SCSI_DH_RETRY we should wait a couple seconds */
1197 case SCSI_DH_RETRY: 1207 case SCSI_DH_RETRY:
1208 /* Wait before retrying. */
1209 delay_retry = 1;
1198 case SCSI_DH_IMM_RETRY: 1210 case SCSI_DH_IMM_RETRY:
1199 case SCSI_DH_RES_TEMP_UNAVAIL: 1211 case SCSI_DH_RES_TEMP_UNAVAIL:
1200 if (pg_init_limit_reached(m, pgpath)) 1212 if (pg_init_limit_reached(m, pgpath))
@@ -1227,6 +1239,7 @@ static void pg_init_done(void *data, int errors)
1227 if (!m->pg_init_required) 1239 if (!m->pg_init_required)
1228 m->queue_io = 0; 1240 m->queue_io = 0;
1229 1241
1242 m->pg_init_delay_retry = delay_retry;
1230 queue_work(kmultipathd, &m->process_queued_ios); 1243 queue_work(kmultipathd, &m->process_queued_ios);
1231 1244
1232 /* 1245 /*
@@ -1241,7 +1254,7 @@ out:
1241static void activate_path(struct work_struct *work) 1254static void activate_path(struct work_struct *work)
1242{ 1255{
1243 struct pgpath *pgpath = 1256 struct pgpath *pgpath =
1244 container_of(work, struct pgpath, activate_path); 1257 container_of(work, struct pgpath, activate_path.work);
1245 1258
1246 scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev), 1259 scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
1247 pg_init_done, pgpath); 1260 pg_init_done, pgpath);
@@ -1382,11 +1395,14 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
1382 DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count); 1395 DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count);
1383 else { 1396 else {
1384 DMEMIT("%u ", m->queue_if_no_path + 1397 DMEMIT("%u ", m->queue_if_no_path +
1385 (m->pg_init_retries > 0) * 2); 1398 (m->pg_init_retries > 0) * 2 +
1399 (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2);
1386 if (m->queue_if_no_path) 1400 if (m->queue_if_no_path)
1387 DMEMIT("queue_if_no_path "); 1401 DMEMIT("queue_if_no_path ");
1388 if (m->pg_init_retries) 1402 if (m->pg_init_retries)
1389 DMEMIT("pg_init_retries %u ", m->pg_init_retries); 1403 DMEMIT("pg_init_retries %u ", m->pg_init_retries);
1404 if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1405 DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
1390 } 1406 }
1391 1407
1392 if (!m->hw_handler_name || type == STATUSTYPE_INFO) 1408 if (!m->hw_handler_name || type == STATUSTYPE_INFO)
@@ -1655,7 +1671,7 @@ out:
1655 *---------------------------------------------------------------*/ 1671 *---------------------------------------------------------------*/
1656static struct target_type multipath_target = { 1672static struct target_type multipath_target = {
1657 .name = "multipath", 1673 .name = "multipath",
1658 .version = {1, 1, 1}, 1674 .version = {1, 2, 0},
1659 .module = THIS_MODULE, 1675 .module = THIS_MODULE,
1660 .ctr = multipath_ctr, 1676 .ctr = multipath_ctr,
1661 .dtr = multipath_dtr, 1677 .dtr = multipath_dtr,
@@ -1687,7 +1703,7 @@ static int __init dm_multipath_init(void)
1687 return -EINVAL; 1703 return -EINVAL;
1688 } 1704 }
1689 1705
1690 kmultipathd = create_workqueue("kmpathd"); 1706 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
1691 if (!kmultipathd) { 1707 if (!kmultipathd) {
1692 DMERR("failed to create workqueue kmpathd"); 1708 DMERR("failed to create workqueue kmpathd");
1693 dm_unregister_target(&multipath_target); 1709 dm_unregister_target(&multipath_target);
@@ -1701,7 +1717,8 @@ static int __init dm_multipath_init(void)
1701 * old workqueue would also create a bottleneck in the 1717 * old workqueue would also create a bottleneck in the
1702 * path of the storage hardware device activation. 1718 * path of the storage hardware device activation.
1703 */ 1719 */
1704 kmpath_handlerd = create_singlethread_workqueue("kmpath_handlerd"); 1720 kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
1721 WQ_MEM_RECLAIM);
1705 if (!kmpath_handlerd) { 1722 if (!kmpath_handlerd) {
1706 DMERR("failed to create workqueue kmpath_handlerd"); 1723 DMERR("failed to create workqueue kmpath_handlerd");
1707 destroy_workqueue(kmultipathd); 1724 destroy_workqueue(kmultipathd);