diff options
Diffstat (limited to 'drivers/md/dm-mpath.c')
-rw-r--r-- | drivers/md/dm-mpath.c | 178 |
1 files changed, 128 insertions, 50 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 32d0b878eccc..826bce7343b3 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -69,6 +69,7 @@ struct multipath { | |||
69 | struct list_head priority_groups; | 69 | struct list_head priority_groups; |
70 | unsigned pg_init_required; /* pg_init needs calling? */ | 70 | unsigned pg_init_required; /* pg_init needs calling? */ |
71 | unsigned pg_init_in_progress; /* Only one pg_init allowed at once */ | 71 | unsigned pg_init_in_progress; /* Only one pg_init allowed at once */ |
72 | wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */ | ||
72 | 73 | ||
73 | unsigned nr_valid_paths; /* Total number of usable paths */ | 74 | unsigned nr_valid_paths; /* Total number of usable paths */ |
74 | struct pgpath *current_pgpath; | 75 | struct pgpath *current_pgpath; |
@@ -93,6 +94,8 @@ struct multipath { | |||
93 | * can resubmit bios on error. | 94 | * can resubmit bios on error. |
94 | */ | 95 | */ |
95 | mempool_t *mpio_pool; | 96 | mempool_t *mpio_pool; |
97 | |||
98 | struct mutex work_mutex; | ||
96 | }; | 99 | }; |
97 | 100 | ||
98 | /* | 101 | /* |
@@ -198,6 +201,8 @@ static struct multipath *alloc_multipath(struct dm_target *ti) | |||
198 | m->queue_io = 1; | 201 | m->queue_io = 1; |
199 | INIT_WORK(&m->process_queued_ios, process_queued_ios); | 202 | INIT_WORK(&m->process_queued_ios, process_queued_ios); |
200 | INIT_WORK(&m->trigger_event, trigger_event); | 203 | INIT_WORK(&m->trigger_event, trigger_event); |
204 | init_waitqueue_head(&m->pg_init_wait); | ||
205 | mutex_init(&m->work_mutex); | ||
201 | m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); | 206 | m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); |
202 | if (!m->mpio_pool) { | 207 | if (!m->mpio_pool) { |
203 | kfree(m); | 208 | kfree(m); |
@@ -230,6 +235,21 @@ static void free_multipath(struct multipath *m) | |||
230 | * Path selection | 235 | * Path selection |
231 | *-----------------------------------------------*/ | 236 | *-----------------------------------------------*/ |
232 | 237 | ||
238 | static void __pg_init_all_paths(struct multipath *m) | ||
239 | { | ||
240 | struct pgpath *pgpath; | ||
241 | |||
242 | m->pg_init_count++; | ||
243 | m->pg_init_required = 0; | ||
244 | list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) { | ||
245 | /* Skip failed paths */ | ||
246 | if (!pgpath->is_active) | ||
247 | continue; | ||
248 | if (queue_work(kmpath_handlerd, &pgpath->activate_path)) | ||
249 | m->pg_init_in_progress++; | ||
250 | } | ||
251 | } | ||
252 | |||
233 | static void __switch_pg(struct multipath *m, struct pgpath *pgpath) | 253 | static void __switch_pg(struct multipath *m, struct pgpath *pgpath) |
234 | { | 254 | { |
235 | m->current_pg = pgpath->pg; | 255 | m->current_pg = pgpath->pg; |
@@ -434,7 +454,7 @@ static void process_queued_ios(struct work_struct *work) | |||
434 | { | 454 | { |
435 | struct multipath *m = | 455 | struct multipath *m = |
436 | container_of(work, struct multipath, process_queued_ios); | 456 | container_of(work, struct multipath, process_queued_ios); |
437 | struct pgpath *pgpath = NULL, *tmp; | 457 | struct pgpath *pgpath = NULL; |
438 | unsigned must_queue = 1; | 458 | unsigned must_queue = 1; |
439 | unsigned long flags; | 459 | unsigned long flags; |
440 | 460 | ||
@@ -452,14 +472,9 @@ static void process_queued_ios(struct work_struct *work) | |||
452 | (!pgpath && !m->queue_if_no_path)) | 472 | (!pgpath && !m->queue_if_no_path)) |
453 | must_queue = 0; | 473 | must_queue = 0; |
454 | 474 | ||
455 | if (m->pg_init_required && !m->pg_init_in_progress && pgpath) { | 475 | if (m->pg_init_required && !m->pg_init_in_progress && pgpath) |
456 | m->pg_init_count++; | 476 | __pg_init_all_paths(m); |
457 | m->pg_init_required = 0; | 477 | |
458 | list_for_each_entry(tmp, &pgpath->pg->pgpaths, list) { | ||
459 | if (queue_work(kmpath_handlerd, &tmp->activate_path)) | ||
460 | m->pg_init_in_progress++; | ||
461 | } | ||
462 | } | ||
463 | out: | 478 | out: |
464 | spin_unlock_irqrestore(&m->lock, flags); | 479 | spin_unlock_irqrestore(&m->lock, flags); |
465 | if (!must_queue) | 480 | if (!must_queue) |
@@ -592,8 +607,8 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps, | |||
592 | if (!p) | 607 | if (!p) |
593 | return ERR_PTR(-ENOMEM); | 608 | return ERR_PTR(-ENOMEM); |
594 | 609 | ||
595 | r = dm_get_device(ti, shift(as), ti->begin, ti->len, | 610 | r = dm_get_device(ti, shift(as), dm_table_get_mode(ti->table), |
596 | dm_table_get_mode(ti->table), &p->path.dev); | 611 | &p->path.dev); |
597 | if (r) { | 612 | if (r) { |
598 | ti->error = "error getting device"; | 613 | ti->error = "error getting device"; |
599 | goto bad; | 614 | goto bad; |
@@ -885,13 +900,43 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, | |||
885 | return r; | 900 | return r; |
886 | } | 901 | } |
887 | 902 | ||
888 | static void multipath_dtr(struct dm_target *ti) | 903 | static void multipath_wait_for_pg_init_completion(struct multipath *m) |
889 | { | 904 | { |
890 | struct multipath *m = (struct multipath *) ti->private; | 905 | DECLARE_WAITQUEUE(wait, current); |
906 | unsigned long flags; | ||
907 | |||
908 | add_wait_queue(&m->pg_init_wait, &wait); | ||
891 | 909 | ||
910 | while (1) { | ||
911 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
912 | |||
913 | spin_lock_irqsave(&m->lock, flags); | ||
914 | if (!m->pg_init_in_progress) { | ||
915 | spin_unlock_irqrestore(&m->lock, flags); | ||
916 | break; | ||
917 | } | ||
918 | spin_unlock_irqrestore(&m->lock, flags); | ||
919 | |||
920 | io_schedule(); | ||
921 | } | ||
922 | set_current_state(TASK_RUNNING); | ||
923 | |||
924 | remove_wait_queue(&m->pg_init_wait, &wait); | ||
925 | } | ||
926 | |||
927 | static void flush_multipath_work(struct multipath *m) | ||
928 | { | ||
892 | flush_workqueue(kmpath_handlerd); | 929 | flush_workqueue(kmpath_handlerd); |
930 | multipath_wait_for_pg_init_completion(m); | ||
893 | flush_workqueue(kmultipathd); | 931 | flush_workqueue(kmultipathd); |
894 | flush_scheduled_work(); | 932 | flush_scheduled_work(); |
933 | } | ||
934 | |||
935 | static void multipath_dtr(struct dm_target *ti) | ||
936 | { | ||
937 | struct multipath *m = ti->private; | ||
938 | |||
939 | flush_multipath_work(m); | ||
895 | free_multipath(m); | 940 | free_multipath(m); |
896 | } | 941 | } |
897 | 942 | ||
@@ -1116,9 +1161,9 @@ static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath) | |||
1116 | return limit_reached; | 1161 | return limit_reached; |
1117 | } | 1162 | } |
1118 | 1163 | ||
1119 | static void pg_init_done(struct dm_path *path, int errors) | 1164 | static void pg_init_done(void *data, int errors) |
1120 | { | 1165 | { |
1121 | struct pgpath *pgpath = path_to_pgpath(path); | 1166 | struct pgpath *pgpath = data; |
1122 | struct priority_group *pg = pgpath->pg; | 1167 | struct priority_group *pg = pgpath->pg; |
1123 | struct multipath *m = pg->m; | 1168 | struct multipath *m = pg->m; |
1124 | unsigned long flags; | 1169 | unsigned long flags; |
@@ -1132,8 +1177,8 @@ static void pg_init_done(struct dm_path *path, int errors) | |||
1132 | errors = 0; | 1177 | errors = 0; |
1133 | break; | 1178 | break; |
1134 | } | 1179 | } |
1135 | DMERR("Cannot failover device because scsi_dh_%s was not " | 1180 | DMERR("Could not failover the device: Handler scsi_dh_%s " |
1136 | "loaded.", m->hw_handler_name); | 1181 | "Error %d.", m->hw_handler_name, errors); |
1137 | /* | 1182 | /* |
1138 | * Fail path for now, so we do not ping pong | 1183 | * Fail path for now, so we do not ping pong |
1139 | */ | 1184 | */ |
@@ -1170,25 +1215,34 @@ static void pg_init_done(struct dm_path *path, int errors) | |||
1170 | m->current_pgpath = NULL; | 1215 | m->current_pgpath = NULL; |
1171 | m->current_pg = NULL; | 1216 | m->current_pg = NULL; |
1172 | } | 1217 | } |
1173 | } else if (!m->pg_init_required) { | 1218 | } else if (!m->pg_init_required) |
1174 | m->queue_io = 0; | ||
1175 | pg->bypassed = 0; | 1219 | pg->bypassed = 0; |
1176 | } | ||
1177 | 1220 | ||
1178 | m->pg_init_in_progress--; | 1221 | if (--m->pg_init_in_progress) |
1179 | if (!m->pg_init_in_progress) | 1222 | /* Activations of other paths are still on going */ |
1180 | queue_work(kmultipathd, &m->process_queued_ios); | 1223 | goto out; |
1224 | |||
1225 | if (!m->pg_init_required) | ||
1226 | m->queue_io = 0; | ||
1227 | |||
1228 | queue_work(kmultipathd, &m->process_queued_ios); | ||
1229 | |||
1230 | /* | ||
1231 | * Wake up any thread waiting to suspend. | ||
1232 | */ | ||
1233 | wake_up(&m->pg_init_wait); | ||
1234 | |||
1235 | out: | ||
1181 | spin_unlock_irqrestore(&m->lock, flags); | 1236 | spin_unlock_irqrestore(&m->lock, flags); |
1182 | } | 1237 | } |
1183 | 1238 | ||
1184 | static void activate_path(struct work_struct *work) | 1239 | static void activate_path(struct work_struct *work) |
1185 | { | 1240 | { |
1186 | int ret; | ||
1187 | struct pgpath *pgpath = | 1241 | struct pgpath *pgpath = |
1188 | container_of(work, struct pgpath, activate_path); | 1242 | container_of(work, struct pgpath, activate_path); |
1189 | 1243 | ||
1190 | ret = scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev)); | 1244 | scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev), |
1191 | pg_init_done(&pgpath->path, ret); | 1245 | pg_init_done, pgpath); |
1192 | } | 1246 | } |
1193 | 1247 | ||
1194 | /* | 1248 | /* |
@@ -1261,6 +1315,15 @@ static void multipath_presuspend(struct dm_target *ti) | |||
1261 | queue_if_no_path(m, 0, 1); | 1315 | queue_if_no_path(m, 0, 1); |
1262 | } | 1316 | } |
1263 | 1317 | ||
1318 | static void multipath_postsuspend(struct dm_target *ti) | ||
1319 | { | ||
1320 | struct multipath *m = ti->private; | ||
1321 | |||
1322 | mutex_lock(&m->work_mutex); | ||
1323 | flush_multipath_work(m); | ||
1324 | mutex_unlock(&m->work_mutex); | ||
1325 | } | ||
1326 | |||
1264 | /* | 1327 | /* |
1265 | * Restore the queue_if_no_path setting. | 1328 | * Restore the queue_if_no_path setting. |
1266 | */ | 1329 | */ |
@@ -1397,51 +1460,65 @@ static int multipath_status(struct dm_target *ti, status_type_t type, | |||
1397 | 1460 | ||
1398 | static int multipath_message(struct dm_target *ti, unsigned argc, char **argv) | 1461 | static int multipath_message(struct dm_target *ti, unsigned argc, char **argv) |
1399 | { | 1462 | { |
1400 | int r; | 1463 | int r = -EINVAL; |
1401 | struct dm_dev *dev; | 1464 | struct dm_dev *dev; |
1402 | struct multipath *m = (struct multipath *) ti->private; | 1465 | struct multipath *m = (struct multipath *) ti->private; |
1403 | action_fn action; | 1466 | action_fn action; |
1404 | 1467 | ||
1468 | mutex_lock(&m->work_mutex); | ||
1469 | |||
1470 | if (dm_suspended(ti)) { | ||
1471 | r = -EBUSY; | ||
1472 | goto out; | ||
1473 | } | ||
1474 | |||
1405 | if (argc == 1) { | 1475 | if (argc == 1) { |
1406 | if (!strnicmp(argv[0], MESG_STR("queue_if_no_path"))) | 1476 | if (!strnicmp(argv[0], MESG_STR("queue_if_no_path"))) { |
1407 | return queue_if_no_path(m, 1, 0); | 1477 | r = queue_if_no_path(m, 1, 0); |
1408 | else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path"))) | 1478 | goto out; |
1409 | return queue_if_no_path(m, 0, 0); | 1479 | } else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path"))) { |
1480 | r = queue_if_no_path(m, 0, 0); | ||
1481 | goto out; | ||
1482 | } | ||
1410 | } | 1483 | } |
1411 | 1484 | ||
1412 | if (argc != 2) | 1485 | if (argc != 2) { |
1413 | goto error; | 1486 | DMWARN("Unrecognised multipath message received."); |
1487 | goto out; | ||
1488 | } | ||
1414 | 1489 | ||
1415 | if (!strnicmp(argv[0], MESG_STR("disable_group"))) | 1490 | if (!strnicmp(argv[0], MESG_STR("disable_group"))) { |
1416 | return bypass_pg_num(m, argv[1], 1); | 1491 | r = bypass_pg_num(m, argv[1], 1); |
1417 | else if (!strnicmp(argv[0], MESG_STR("enable_group"))) | 1492 | goto out; |
1418 | return bypass_pg_num(m, argv[1], 0); | 1493 | } else if (!strnicmp(argv[0], MESG_STR("enable_group"))) { |
1419 | else if (!strnicmp(argv[0], MESG_STR("switch_group"))) | 1494 | r = bypass_pg_num(m, argv[1], 0); |
1420 | return switch_pg_num(m, argv[1]); | 1495 | goto out; |
1421 | else if (!strnicmp(argv[0], MESG_STR("reinstate_path"))) | 1496 | } else if (!strnicmp(argv[0], MESG_STR("switch_group"))) { |
1497 | r = switch_pg_num(m, argv[1]); | ||
1498 | goto out; | ||
1499 | } else if (!strnicmp(argv[0], MESG_STR("reinstate_path"))) | ||
1422 | action = reinstate_path; | 1500 | action = reinstate_path; |
1423 | else if (!strnicmp(argv[0], MESG_STR("fail_path"))) | 1501 | else if (!strnicmp(argv[0], MESG_STR("fail_path"))) |
1424 | action = fail_path; | 1502 | action = fail_path; |
1425 | else | 1503 | else { |
1426 | goto error; | 1504 | DMWARN("Unrecognised multipath message received."); |
1505 | goto out; | ||
1506 | } | ||
1427 | 1507 | ||
1428 | r = dm_get_device(ti, argv[1], ti->begin, ti->len, | 1508 | r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev); |
1429 | dm_table_get_mode(ti->table), &dev); | ||
1430 | if (r) { | 1509 | if (r) { |
1431 | DMWARN("message: error getting device %s", | 1510 | DMWARN("message: error getting device %s", |
1432 | argv[1]); | 1511 | argv[1]); |
1433 | return -EINVAL; | 1512 | goto out; |
1434 | } | 1513 | } |
1435 | 1514 | ||
1436 | r = action_dev(m, dev, action); | 1515 | r = action_dev(m, dev, action); |
1437 | 1516 | ||
1438 | dm_put_device(ti, dev); | 1517 | dm_put_device(ti, dev); |
1439 | 1518 | ||
1519 | out: | ||
1520 | mutex_unlock(&m->work_mutex); | ||
1440 | return r; | 1521 | return r; |
1441 | |||
1442 | error: | ||
1443 | DMWARN("Unrecognised multipath message received."); | ||
1444 | return -EINVAL; | ||
1445 | } | 1522 | } |
1446 | 1523 | ||
1447 | static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, | 1524 | static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, |
@@ -1567,13 +1644,14 @@ out: | |||
1567 | *---------------------------------------------------------------*/ | 1644 | *---------------------------------------------------------------*/ |
1568 | static struct target_type multipath_target = { | 1645 | static struct target_type multipath_target = { |
1569 | .name = "multipath", | 1646 | .name = "multipath", |
1570 | .version = {1, 1, 0}, | 1647 | .version = {1, 1, 1}, |
1571 | .module = THIS_MODULE, | 1648 | .module = THIS_MODULE, |
1572 | .ctr = multipath_ctr, | 1649 | .ctr = multipath_ctr, |
1573 | .dtr = multipath_dtr, | 1650 | .dtr = multipath_dtr, |
1574 | .map_rq = multipath_map, | 1651 | .map_rq = multipath_map, |
1575 | .rq_end_io = multipath_end_io, | 1652 | .rq_end_io = multipath_end_io, |
1576 | .presuspend = multipath_presuspend, | 1653 | .presuspend = multipath_presuspend, |
1654 | .postsuspend = multipath_postsuspend, | ||
1577 | .resume = multipath_resume, | 1655 | .resume = multipath_resume, |
1578 | .status = multipath_status, | 1656 | .status = multipath_status, |
1579 | .message = multipath_message, | 1657 | .message = multipath_message, |