diff options
Diffstat (limited to 'drivers/md/dm-mpath.c')
-rw-r--r-- | drivers/md/dm-mpath.c | 111 |
1 files changed, 72 insertions, 39 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index e81345a1d08f..826bce7343b3 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -69,6 +69,7 @@ struct multipath { | |||
69 | struct list_head priority_groups; | 69 | struct list_head priority_groups; |
70 | unsigned pg_init_required; /* pg_init needs calling? */ | 70 | unsigned pg_init_required; /* pg_init needs calling? */ |
71 | unsigned pg_init_in_progress; /* Only one pg_init allowed at once */ | 71 | unsigned pg_init_in_progress; /* Only one pg_init allowed at once */ |
72 | wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */ | ||
72 | 73 | ||
73 | unsigned nr_valid_paths; /* Total number of usable paths */ | 74 | unsigned nr_valid_paths; /* Total number of usable paths */ |
74 | struct pgpath *current_pgpath; | 75 | struct pgpath *current_pgpath; |
@@ -95,8 +96,6 @@ struct multipath { | |||
95 | mempool_t *mpio_pool; | 96 | mempool_t *mpio_pool; |
96 | 97 | ||
97 | struct mutex work_mutex; | 98 | struct mutex work_mutex; |
98 | |||
99 | unsigned suspended; /* Don't create new I/O internally when set. */ | ||
100 | }; | 99 | }; |
101 | 100 | ||
102 | /* | 101 | /* |
@@ -202,6 +201,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti) | |||
202 | m->queue_io = 1; | 201 | m->queue_io = 1; |
203 | INIT_WORK(&m->process_queued_ios, process_queued_ios); | 202 | INIT_WORK(&m->process_queued_ios, process_queued_ios); |
204 | INIT_WORK(&m->trigger_event, trigger_event); | 203 | INIT_WORK(&m->trigger_event, trigger_event); |
204 | init_waitqueue_head(&m->pg_init_wait); | ||
205 | mutex_init(&m->work_mutex); | 205 | mutex_init(&m->work_mutex); |
206 | m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); | 206 | m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); |
207 | if (!m->mpio_pool) { | 207 | if (!m->mpio_pool) { |
@@ -235,6 +235,21 @@ static void free_multipath(struct multipath *m) | |||
235 | * Path selection | 235 | * Path selection |
236 | *-----------------------------------------------*/ | 236 | *-----------------------------------------------*/ |
237 | 237 | ||
238 | static void __pg_init_all_paths(struct multipath *m) | ||
239 | { | ||
240 | struct pgpath *pgpath; | ||
241 | |||
242 | m->pg_init_count++; | ||
243 | m->pg_init_required = 0; | ||
244 | list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) { | ||
245 | /* Skip failed paths */ | ||
246 | if (!pgpath->is_active) | ||
247 | continue; | ||
248 | if (queue_work(kmpath_handlerd, &pgpath->activate_path)) | ||
249 | m->pg_init_in_progress++; | ||
250 | } | ||
251 | } | ||
252 | |||
238 | static void __switch_pg(struct multipath *m, struct pgpath *pgpath) | 253 | static void __switch_pg(struct multipath *m, struct pgpath *pgpath) |
239 | { | 254 | { |
240 | m->current_pg = pgpath->pg; | 255 | m->current_pg = pgpath->pg; |
@@ -439,7 +454,7 @@ static void process_queued_ios(struct work_struct *work) | |||
439 | { | 454 | { |
440 | struct multipath *m = | 455 | struct multipath *m = |
441 | container_of(work, struct multipath, process_queued_ios); | 456 | container_of(work, struct multipath, process_queued_ios); |
442 | struct pgpath *pgpath = NULL, *tmp; | 457 | struct pgpath *pgpath = NULL; |
443 | unsigned must_queue = 1; | 458 | unsigned must_queue = 1; |
444 | unsigned long flags; | 459 | unsigned long flags; |
445 | 460 | ||
@@ -457,14 +472,9 @@ static void process_queued_ios(struct work_struct *work) | |||
457 | (!pgpath && !m->queue_if_no_path)) | 472 | (!pgpath && !m->queue_if_no_path)) |
458 | must_queue = 0; | 473 | must_queue = 0; |
459 | 474 | ||
460 | if (m->pg_init_required && !m->pg_init_in_progress && pgpath) { | 475 | if (m->pg_init_required && !m->pg_init_in_progress && pgpath) |
461 | m->pg_init_count++; | 476 | __pg_init_all_paths(m); |
462 | m->pg_init_required = 0; | 477 | |
463 | list_for_each_entry(tmp, &pgpath->pg->pgpaths, list) { | ||
464 | if (queue_work(kmpath_handlerd, &tmp->activate_path)) | ||
465 | m->pg_init_in_progress++; | ||
466 | } | ||
467 | } | ||
468 | out: | 478 | out: |
469 | spin_unlock_irqrestore(&m->lock, flags); | 479 | spin_unlock_irqrestore(&m->lock, flags); |
470 | if (!must_queue) | 480 | if (!must_queue) |
@@ -597,8 +607,8 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps, | |||
597 | if (!p) | 607 | if (!p) |
598 | return ERR_PTR(-ENOMEM); | 608 | return ERR_PTR(-ENOMEM); |
599 | 609 | ||
600 | r = dm_get_device(ti, shift(as), ti->begin, ti->len, | 610 | r = dm_get_device(ti, shift(as), dm_table_get_mode(ti->table), |
601 | dm_table_get_mode(ti->table), &p->path.dev); | 611 | &p->path.dev); |
602 | if (r) { | 612 | if (r) { |
603 | ti->error = "error getting device"; | 613 | ti->error = "error getting device"; |
604 | goto bad; | 614 | goto bad; |
@@ -890,9 +900,34 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, | |||
890 | return r; | 900 | return r; |
891 | } | 901 | } |
892 | 902 | ||
893 | static void flush_multipath_work(void) | 903 | static void multipath_wait_for_pg_init_completion(struct multipath *m) |
904 | { | ||
905 | DECLARE_WAITQUEUE(wait, current); | ||
906 | unsigned long flags; | ||
907 | |||
908 | add_wait_queue(&m->pg_init_wait, &wait); | ||
909 | |||
910 | while (1) { | ||
911 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
912 | |||
913 | spin_lock_irqsave(&m->lock, flags); | ||
914 | if (!m->pg_init_in_progress) { | ||
915 | spin_unlock_irqrestore(&m->lock, flags); | ||
916 | break; | ||
917 | } | ||
918 | spin_unlock_irqrestore(&m->lock, flags); | ||
919 | |||
920 | io_schedule(); | ||
921 | } | ||
922 | set_current_state(TASK_RUNNING); | ||
923 | |||
924 | remove_wait_queue(&m->pg_init_wait, &wait); | ||
925 | } | ||
926 | |||
927 | static void flush_multipath_work(struct multipath *m) | ||
894 | { | 928 | { |
895 | flush_workqueue(kmpath_handlerd); | 929 | flush_workqueue(kmpath_handlerd); |
930 | multipath_wait_for_pg_init_completion(m); | ||
896 | flush_workqueue(kmultipathd); | 931 | flush_workqueue(kmultipathd); |
897 | flush_scheduled_work(); | 932 | flush_scheduled_work(); |
898 | } | 933 | } |
@@ -901,7 +936,7 @@ static void multipath_dtr(struct dm_target *ti) | |||
901 | { | 936 | { |
902 | struct multipath *m = ti->private; | 937 | struct multipath *m = ti->private; |
903 | 938 | ||
904 | flush_multipath_work(); | 939 | flush_multipath_work(m); |
905 | free_multipath(m); | 940 | free_multipath(m); |
906 | } | 941 | } |
907 | 942 | ||
@@ -1128,8 +1163,7 @@ static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath) | |||
1128 | 1163 | ||
1129 | static void pg_init_done(void *data, int errors) | 1164 | static void pg_init_done(void *data, int errors) |
1130 | { | 1165 | { |
1131 | struct dm_path *path = data; | 1166 | struct pgpath *pgpath = data; |
1132 | struct pgpath *pgpath = path_to_pgpath(path); | ||
1133 | struct priority_group *pg = pgpath->pg; | 1167 | struct priority_group *pg = pgpath->pg; |
1134 | struct multipath *m = pg->m; | 1168 | struct multipath *m = pg->m; |
1135 | unsigned long flags; | 1169 | unsigned long flags; |
@@ -1143,8 +1177,8 @@ static void pg_init_done(void *data, int errors) | |||
1143 | errors = 0; | 1177 | errors = 0; |
1144 | break; | 1178 | break; |
1145 | } | 1179 | } |
1146 | DMERR("Cannot failover device because scsi_dh_%s was not " | 1180 | DMERR("Could not failover the device: Handler scsi_dh_%s " |
1147 | "loaded.", m->hw_handler_name); | 1181 | "Error %d.", m->hw_handler_name, errors); |
1148 | /* | 1182 | /* |
1149 | * Fail path for now, so we do not ping pong | 1183 | * Fail path for now, so we do not ping pong |
1150 | */ | 1184 | */ |
@@ -1181,14 +1215,24 @@ static void pg_init_done(void *data, int errors) | |||
1181 | m->current_pgpath = NULL; | 1215 | m->current_pgpath = NULL; |
1182 | m->current_pg = NULL; | 1216 | m->current_pg = NULL; |
1183 | } | 1217 | } |
1184 | } else if (!m->pg_init_required) { | 1218 | } else if (!m->pg_init_required) |
1185 | m->queue_io = 0; | ||
1186 | pg->bypassed = 0; | 1219 | pg->bypassed = 0; |
1187 | } | ||
1188 | 1220 | ||
1189 | m->pg_init_in_progress--; | 1221 | if (--m->pg_init_in_progress) |
1190 | if (!m->pg_init_in_progress) | 1222 | /* Activations of other paths are still on going */ |
1191 | queue_work(kmultipathd, &m->process_queued_ios); | 1223 | goto out; |
1224 | |||
1225 | if (!m->pg_init_required) | ||
1226 | m->queue_io = 0; | ||
1227 | |||
1228 | queue_work(kmultipathd, &m->process_queued_ios); | ||
1229 | |||
1230 | /* | ||
1231 | * Wake up any thread waiting to suspend. | ||
1232 | */ | ||
1233 | wake_up(&m->pg_init_wait); | ||
1234 | |||
1235 | out: | ||
1192 | spin_unlock_irqrestore(&m->lock, flags); | 1236 | spin_unlock_irqrestore(&m->lock, flags); |
1193 | } | 1237 | } |
1194 | 1238 | ||
@@ -1198,7 +1242,7 @@ static void activate_path(struct work_struct *work) | |||
1198 | container_of(work, struct pgpath, activate_path); | 1242 | container_of(work, struct pgpath, activate_path); |
1199 | 1243 | ||
1200 | scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev), | 1244 | scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev), |
1201 | pg_init_done, &pgpath->path); | 1245 | pg_init_done, pgpath); |
1202 | } | 1246 | } |
1203 | 1247 | ||
1204 | /* | 1248 | /* |
@@ -1276,8 +1320,7 @@ static void multipath_postsuspend(struct dm_target *ti) | |||
1276 | struct multipath *m = ti->private; | 1320 | struct multipath *m = ti->private; |
1277 | 1321 | ||
1278 | mutex_lock(&m->work_mutex); | 1322 | mutex_lock(&m->work_mutex); |
1279 | m->suspended = 1; | 1323 | flush_multipath_work(m); |
1280 | flush_multipath_work(); | ||
1281 | mutex_unlock(&m->work_mutex); | 1324 | mutex_unlock(&m->work_mutex); |
1282 | } | 1325 | } |
1283 | 1326 | ||
@@ -1289,10 +1332,6 @@ static void multipath_resume(struct dm_target *ti) | |||
1289 | struct multipath *m = (struct multipath *) ti->private; | 1332 | struct multipath *m = (struct multipath *) ti->private; |
1290 | unsigned long flags; | 1333 | unsigned long flags; |
1291 | 1334 | ||
1292 | mutex_lock(&m->work_mutex); | ||
1293 | m->suspended = 0; | ||
1294 | mutex_unlock(&m->work_mutex); | ||
1295 | |||
1296 | spin_lock_irqsave(&m->lock, flags); | 1335 | spin_lock_irqsave(&m->lock, flags); |
1297 | m->queue_if_no_path = m->saved_queue_if_no_path; | 1336 | m->queue_if_no_path = m->saved_queue_if_no_path; |
1298 | spin_unlock_irqrestore(&m->lock, flags); | 1337 | spin_unlock_irqrestore(&m->lock, flags); |
@@ -1428,11 +1467,6 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv) | |||
1428 | 1467 | ||
1429 | mutex_lock(&m->work_mutex); | 1468 | mutex_lock(&m->work_mutex); |
1430 | 1469 | ||
1431 | if (m->suspended) { | ||
1432 | r = -EBUSY; | ||
1433 | goto out; | ||
1434 | } | ||
1435 | |||
1436 | if (dm_suspended(ti)) { | 1470 | if (dm_suspended(ti)) { |
1437 | r = -EBUSY; | 1471 | r = -EBUSY; |
1438 | goto out; | 1472 | goto out; |
@@ -1471,8 +1505,7 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv) | |||
1471 | goto out; | 1505 | goto out; |
1472 | } | 1506 | } |
1473 | 1507 | ||
1474 | r = dm_get_device(ti, argv[1], ti->begin, ti->len, | 1508 | r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev); |
1475 | dm_table_get_mode(ti->table), &dev); | ||
1476 | if (r) { | 1509 | if (r) { |
1477 | DMWARN("message: error getting device %s", | 1510 | DMWARN("message: error getting device %s", |
1478 | argv[1]); | 1511 | argv[1]); |