diff options
Diffstat (limited to 'drivers/md/dm-mpath.c')
-rw-r--r-- | drivers/md/dm-mpath.c | 163 |
1 files changed, 90 insertions, 73 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index e7ee59e655d5..9f7302d4878d 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -7,7 +7,6 @@ | |||
7 | 7 | ||
8 | #include "dm.h" | 8 | #include "dm.h" |
9 | #include "dm-path-selector.h" | 9 | #include "dm-path-selector.h" |
10 | #include "dm-hw-handler.h" | ||
11 | #include "dm-bio-list.h" | 10 | #include "dm-bio-list.h" |
12 | #include "dm-bio-record.h" | 11 | #include "dm-bio-record.h" |
13 | #include "dm-uevent.h" | 12 | #include "dm-uevent.h" |
@@ -20,6 +19,7 @@ | |||
20 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
21 | #include <linux/time.h> | 20 | #include <linux/time.h> |
22 | #include <linux/workqueue.h> | 21 | #include <linux/workqueue.h> |
22 | #include <scsi/scsi_dh.h> | ||
23 | #include <asm/atomic.h> | 23 | #include <asm/atomic.h> |
24 | 24 | ||
25 | #define DM_MSG_PREFIX "multipath" | 25 | #define DM_MSG_PREFIX "multipath" |
@@ -61,7 +61,8 @@ struct multipath { | |||
61 | 61 | ||
62 | spinlock_t lock; | 62 | spinlock_t lock; |
63 | 63 | ||
64 | struct hw_handler hw_handler; | 64 | const char *hw_handler_name; |
65 | struct work_struct activate_path; | ||
65 | unsigned nr_priority_groups; | 66 | unsigned nr_priority_groups; |
66 | struct list_head priority_groups; | 67 | struct list_head priority_groups; |
67 | unsigned pg_init_required; /* pg_init needs calling? */ | 68 | unsigned pg_init_required; /* pg_init needs calling? */ |
@@ -106,9 +107,10 @@ typedef int (*action_fn) (struct pgpath *pgpath); | |||
106 | 107 | ||
107 | static struct kmem_cache *_mpio_cache; | 108 | static struct kmem_cache *_mpio_cache; |
108 | 109 | ||
109 | static struct workqueue_struct *kmultipathd; | 110 | static struct workqueue_struct *kmultipathd, *kmpath_handlerd; |
110 | static void process_queued_ios(struct work_struct *work); | 111 | static void process_queued_ios(struct work_struct *work); |
111 | static void trigger_event(struct work_struct *work); | 112 | static void trigger_event(struct work_struct *work); |
113 | static void activate_path(struct work_struct *work); | ||
112 | 114 | ||
113 | 115 | ||
114 | /*----------------------------------------------- | 116 | /*----------------------------------------------- |
@@ -178,6 +180,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti) | |||
178 | m->queue_io = 1; | 180 | m->queue_io = 1; |
179 | INIT_WORK(&m->process_queued_ios, process_queued_ios); | 181 | INIT_WORK(&m->process_queued_ios, process_queued_ios); |
180 | INIT_WORK(&m->trigger_event, trigger_event); | 182 | INIT_WORK(&m->trigger_event, trigger_event); |
183 | INIT_WORK(&m->activate_path, activate_path); | ||
181 | m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); | 184 | m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); |
182 | if (!m->mpio_pool) { | 185 | if (!m->mpio_pool) { |
183 | kfree(m); | 186 | kfree(m); |
@@ -193,18 +196,13 @@ static struct multipath *alloc_multipath(struct dm_target *ti) | |||
193 | static void free_multipath(struct multipath *m) | 196 | static void free_multipath(struct multipath *m) |
194 | { | 197 | { |
195 | struct priority_group *pg, *tmp; | 198 | struct priority_group *pg, *tmp; |
196 | struct hw_handler *hwh = &m->hw_handler; | ||
197 | 199 | ||
198 | list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) { | 200 | list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) { |
199 | list_del(&pg->list); | 201 | list_del(&pg->list); |
200 | free_priority_group(pg, m->ti); | 202 | free_priority_group(pg, m->ti); |
201 | } | 203 | } |
202 | 204 | ||
203 | if (hwh->type) { | 205 | kfree(m->hw_handler_name); |
204 | hwh->type->destroy(hwh); | ||
205 | dm_put_hw_handler(hwh->type); | ||
206 | } | ||
207 | |||
208 | mempool_destroy(m->mpio_pool); | 206 | mempool_destroy(m->mpio_pool); |
209 | kfree(m); | 207 | kfree(m); |
210 | } | 208 | } |
@@ -216,12 +214,10 @@ static void free_multipath(struct multipath *m) | |||
216 | 214 | ||
217 | static void __switch_pg(struct multipath *m, struct pgpath *pgpath) | 215 | static void __switch_pg(struct multipath *m, struct pgpath *pgpath) |
218 | { | 216 | { |
219 | struct hw_handler *hwh = &m->hw_handler; | ||
220 | |||
221 | m->current_pg = pgpath->pg; | 217 | m->current_pg = pgpath->pg; |
222 | 218 | ||
223 | /* Must we initialise the PG first, and queue I/O till it's ready? */ | 219 | /* Must we initialise the PG first, and queue I/O till it's ready? */ |
224 | if (hwh->type && hwh->type->pg_init) { | 220 | if (m->hw_handler_name) { |
225 | m->pg_init_required = 1; | 221 | m->pg_init_required = 1; |
226 | m->queue_io = 1; | 222 | m->queue_io = 1; |
227 | } else { | 223 | } else { |
@@ -409,7 +405,6 @@ static void process_queued_ios(struct work_struct *work) | |||
409 | { | 405 | { |
410 | struct multipath *m = | 406 | struct multipath *m = |
411 | container_of(work, struct multipath, process_queued_ios); | 407 | container_of(work, struct multipath, process_queued_ios); |
412 | struct hw_handler *hwh = &m->hw_handler; | ||
413 | struct pgpath *pgpath = NULL; | 408 | struct pgpath *pgpath = NULL; |
414 | unsigned init_required = 0, must_queue = 1; | 409 | unsigned init_required = 0, must_queue = 1; |
415 | unsigned long flags; | 410 | unsigned long flags; |
@@ -439,7 +434,7 @@ out: | |||
439 | spin_unlock_irqrestore(&m->lock, flags); | 434 | spin_unlock_irqrestore(&m->lock, flags); |
440 | 435 | ||
441 | if (init_required) | 436 | if (init_required) |
442 | hwh->type->pg_init(hwh, pgpath->pg->bypassed, &pgpath->path); | 437 | queue_work(kmpath_handlerd, &m->activate_path); |
443 | 438 | ||
444 | if (!must_queue) | 439 | if (!must_queue) |
445 | dispatch_queued_ios(m); | 440 | dispatch_queued_ios(m); |
@@ -652,8 +647,6 @@ static struct priority_group *parse_priority_group(struct arg_set *as, | |||
652 | 647 | ||
653 | static int parse_hw_handler(struct arg_set *as, struct multipath *m) | 648 | static int parse_hw_handler(struct arg_set *as, struct multipath *m) |
654 | { | 649 | { |
655 | int r; | ||
656 | struct hw_handler_type *hwht; | ||
657 | unsigned hw_argc; | 650 | unsigned hw_argc; |
658 | struct dm_target *ti = m->ti; | 651 | struct dm_target *ti = m->ti; |
659 | 652 | ||
@@ -661,30 +654,20 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m) | |||
661 | {0, 1024, "invalid number of hardware handler args"}, | 654 | {0, 1024, "invalid number of hardware handler args"}, |
662 | }; | 655 | }; |
663 | 656 | ||
664 | r = read_param(_params, shift(as), &hw_argc, &ti->error); | 657 | if (read_param(_params, shift(as), &hw_argc, &ti->error)) |
665 | if (r) | ||
666 | return -EINVAL; | 658 | return -EINVAL; |
667 | 659 | ||
668 | if (!hw_argc) | 660 | if (!hw_argc) |
669 | return 0; | 661 | return 0; |
670 | 662 | ||
671 | hwht = dm_get_hw_handler(shift(as)); | 663 | m->hw_handler_name = kstrdup(shift(as), GFP_KERNEL); |
672 | if (!hwht) { | 664 | request_module("scsi_dh_%s", m->hw_handler_name); |
665 | if (scsi_dh_handler_exist(m->hw_handler_name) == 0) { | ||
673 | ti->error = "unknown hardware handler type"; | 666 | ti->error = "unknown hardware handler type"; |
667 | kfree(m->hw_handler_name); | ||
668 | m->hw_handler_name = NULL; | ||
674 | return -EINVAL; | 669 | return -EINVAL; |
675 | } | 670 | } |
676 | |||
677 | m->hw_handler.md = dm_table_get_md(ti->table); | ||
678 | dm_put(m->hw_handler.md); | ||
679 | |||
680 | r = hwht->create(&m->hw_handler, hw_argc - 1, as->argv); | ||
681 | if (r) { | ||
682 | dm_put_hw_handler(hwht); | ||
683 | ti->error = "hardware handler constructor failed"; | ||
684 | return r; | ||
685 | } | ||
686 | |||
687 | m->hw_handler.type = hwht; | ||
688 | consume(as, hw_argc - 1); | 671 | consume(as, hw_argc - 1); |
689 | 672 | ||
690 | return 0; | 673 | return 0; |
@@ -808,6 +791,7 @@ static void multipath_dtr(struct dm_target *ti) | |||
808 | { | 791 | { |
809 | struct multipath *m = (struct multipath *) ti->private; | 792 | struct multipath *m = (struct multipath *) ti->private; |
810 | 793 | ||
794 | flush_workqueue(kmpath_handlerd); | ||
811 | flush_workqueue(kmultipathd); | 795 | flush_workqueue(kmultipathd); |
812 | free_multipath(m); | 796 | free_multipath(m); |
813 | } | 797 | } |
@@ -1025,52 +1009,85 @@ static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath) | |||
1025 | return limit_reached; | 1009 | return limit_reached; |
1026 | } | 1010 | } |
1027 | 1011 | ||
1028 | /* | 1012 | static void pg_init_done(struct dm_path *path, int errors) |
1029 | * pg_init must call this when it has completed its initialisation | ||
1030 | */ | ||
1031 | void dm_pg_init_complete(struct dm_path *path, unsigned err_flags) | ||
1032 | { | 1013 | { |
1033 | struct pgpath *pgpath = path_to_pgpath(path); | 1014 | struct pgpath *pgpath = path_to_pgpath(path); |
1034 | struct priority_group *pg = pgpath->pg; | 1015 | struct priority_group *pg = pgpath->pg; |
1035 | struct multipath *m = pg->m; | 1016 | struct multipath *m = pg->m; |
1036 | unsigned long flags; | 1017 | unsigned long flags; |
1037 | 1018 | ||
1038 | /* | 1019 | /* device or driver problems */ |
1039 | * If requested, retry pg_init until maximum number of retries exceeded. | 1020 | switch (errors) { |
1040 | * If retry not requested and PG already bypassed, always fail the path. | 1021 | case SCSI_DH_OK: |
1041 | */ | 1022 | break; |
1042 | if (err_flags & MP_RETRY) { | 1023 | case SCSI_DH_NOSYS: |
1043 | if (pg_init_limit_reached(m, pgpath)) | 1024 | if (!m->hw_handler_name) { |
1044 | err_flags |= MP_FAIL_PATH; | 1025 | errors = 0; |
1045 | } else if (err_flags && pg->bypassed) | 1026 | break; |
1046 | err_flags |= MP_FAIL_PATH; | 1027 | } |
1047 | 1028 | DMERR("Cannot failover device because scsi_dh_%s was not " | |
1048 | if (err_flags & MP_FAIL_PATH) | 1029 | "loaded.", m->hw_handler_name); |
1030 | /* | ||
1031 | * Fail path for now, so we do not ping pong | ||
1032 | */ | ||
1049 | fail_path(pgpath); | 1033 | fail_path(pgpath); |
1050 | 1034 | break; | |
1051 | if (err_flags & MP_BYPASS_PG) | 1035 | case SCSI_DH_DEV_TEMP_BUSY: |
1036 | /* | ||
1037 | * Probably doing something like FW upgrade on the | ||
1038 | * controller so try the other pg. | ||
1039 | */ | ||
1052 | bypass_pg(m, pg, 1); | 1040 | bypass_pg(m, pg, 1); |
1041 | break; | ||
1042 | /* TODO: For SCSI_DH_RETRY we should wait a couple seconds */ | ||
1043 | case SCSI_DH_RETRY: | ||
1044 | case SCSI_DH_IMM_RETRY: | ||
1045 | case SCSI_DH_RES_TEMP_UNAVAIL: | ||
1046 | if (pg_init_limit_reached(m, pgpath)) | ||
1047 | fail_path(pgpath); | ||
1048 | errors = 0; | ||
1049 | break; | ||
1050 | default: | ||
1051 | /* | ||
1052 | * We probably do not want to fail the path for a device | ||
1053 | * error, but this is what the old dm did. In future | ||
1054 | * patches we can do more advanced handling. | ||
1055 | */ | ||
1056 | fail_path(pgpath); | ||
1057 | } | ||
1053 | 1058 | ||
1054 | spin_lock_irqsave(&m->lock, flags); | 1059 | spin_lock_irqsave(&m->lock, flags); |
1055 | if (err_flags & ~MP_RETRY) { | 1060 | if (errors) { |
1061 | DMERR("Could not failover device. Error %d.", errors); | ||
1056 | m->current_pgpath = NULL; | 1062 | m->current_pgpath = NULL; |
1057 | m->current_pg = NULL; | 1063 | m->current_pg = NULL; |
1058 | } else if (!m->pg_init_required) | 1064 | } else if (!m->pg_init_required) { |
1059 | m->queue_io = 0; | 1065 | m->queue_io = 0; |
1066 | pg->bypassed = 0; | ||
1067 | } | ||
1060 | 1068 | ||
1061 | m->pg_init_in_progress = 0; | 1069 | m->pg_init_in_progress = 0; |
1062 | queue_work(kmultipathd, &m->process_queued_ios); | 1070 | queue_work(kmultipathd, &m->process_queued_ios); |
1063 | spin_unlock_irqrestore(&m->lock, flags); | 1071 | spin_unlock_irqrestore(&m->lock, flags); |
1064 | } | 1072 | } |
1065 | 1073 | ||
1074 | static void activate_path(struct work_struct *work) | ||
1075 | { | ||
1076 | int ret; | ||
1077 | struct multipath *m = | ||
1078 | container_of(work, struct multipath, activate_path); | ||
1079 | struct dm_path *path = &m->current_pgpath->path; | ||
1080 | |||
1081 | ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev)); | ||
1082 | pg_init_done(path, ret); | ||
1083 | } | ||
1084 | |||
1066 | /* | 1085 | /* |
1067 | * end_io handling | 1086 | * end_io handling |
1068 | */ | 1087 | */ |
1069 | static int do_end_io(struct multipath *m, struct bio *bio, | 1088 | static int do_end_io(struct multipath *m, struct bio *bio, |
1070 | int error, struct dm_mpath_io *mpio) | 1089 | int error, struct dm_mpath_io *mpio) |
1071 | { | 1090 | { |
1072 | struct hw_handler *hwh = &m->hw_handler; | ||
1073 | unsigned err_flags = MP_FAIL_PATH; /* Default behavior */ | ||
1074 | unsigned long flags; | 1091 | unsigned long flags; |
1075 | 1092 | ||
1076 | if (!error) | 1093 | if (!error) |
@@ -1097,19 +1114,8 @@ static int do_end_io(struct multipath *m, struct bio *bio, | |||
1097 | } | 1114 | } |
1098 | spin_unlock_irqrestore(&m->lock, flags); | 1115 | spin_unlock_irqrestore(&m->lock, flags); |
1099 | 1116 | ||
1100 | if (hwh->type && hwh->type->error) | 1117 | if (mpio->pgpath) |
1101 | err_flags = hwh->type->error(hwh, bio); | 1118 | fail_path(mpio->pgpath); |
1102 | |||
1103 | if (mpio->pgpath) { | ||
1104 | if (err_flags & MP_FAIL_PATH) | ||
1105 | fail_path(mpio->pgpath); | ||
1106 | |||
1107 | if (err_flags & MP_BYPASS_PG) | ||
1108 | bypass_pg(m, mpio->pgpath->pg, 1); | ||
1109 | } | ||
1110 | |||
1111 | if (err_flags & MP_ERROR_IO) | ||
1112 | return -EIO; | ||
1113 | 1119 | ||
1114 | requeue: | 1120 | requeue: |
1115 | dm_bio_restore(&mpio->details, bio); | 1121 | dm_bio_restore(&mpio->details, bio); |
@@ -1194,7 +1200,6 @@ static int multipath_status(struct dm_target *ti, status_type_t type, | |||
1194 | int sz = 0; | 1200 | int sz = 0; |
1195 | unsigned long flags; | 1201 | unsigned long flags; |
1196 | struct multipath *m = (struct multipath *) ti->private; | 1202 | struct multipath *m = (struct multipath *) ti->private; |
1197 | struct hw_handler *hwh = &m->hw_handler; | ||
1198 | struct priority_group *pg; | 1203 | struct priority_group *pg; |
1199 | struct pgpath *p; | 1204 | struct pgpath *p; |
1200 | unsigned pg_num; | 1205 | unsigned pg_num; |
@@ -1214,12 +1219,10 @@ static int multipath_status(struct dm_target *ti, status_type_t type, | |||
1214 | DMEMIT("pg_init_retries %u ", m->pg_init_retries); | 1219 | DMEMIT("pg_init_retries %u ", m->pg_init_retries); |
1215 | } | 1220 | } |
1216 | 1221 | ||
1217 | if (hwh->type && hwh->type->status) | 1222 | if (!m->hw_handler_name || type == STATUSTYPE_INFO) |
1218 | sz += hwh->type->status(hwh, type, result + sz, maxlen - sz); | ||
1219 | else if (!hwh->type || type == STATUSTYPE_INFO) | ||
1220 | DMEMIT("0 "); | 1223 | DMEMIT("0 "); |
1221 | else | 1224 | else |
1222 | DMEMIT("1 %s ", hwh->type->name); | 1225 | DMEMIT("1 %s ", m->hw_handler_name); |
1223 | 1226 | ||
1224 | DMEMIT("%u ", m->nr_priority_groups); | 1227 | DMEMIT("%u ", m->nr_priority_groups); |
1225 | 1228 | ||
@@ -1422,6 +1425,21 @@ static int __init dm_multipath_init(void) | |||
1422 | return -ENOMEM; | 1425 | return -ENOMEM; |
1423 | } | 1426 | } |
1424 | 1427 | ||
1428 | /* | ||
1429 | * A separate workqueue is used to handle the device handlers | ||
1430 | * to avoid overloading existing workqueue. Overloading the | ||
1431 | * old workqueue would also create a bottleneck in the | ||
1432 | * path of the storage hardware device activation. | ||
1433 | */ | ||
1434 | kmpath_handlerd = create_singlethread_workqueue("kmpath_handlerd"); | ||
1435 | if (!kmpath_handlerd) { | ||
1436 | DMERR("failed to create workqueue kmpath_handlerd"); | ||
1437 | destroy_workqueue(kmultipathd); | ||
1438 | dm_unregister_target(&multipath_target); | ||
1439 | kmem_cache_destroy(_mpio_cache); | ||
1440 | return -ENOMEM; | ||
1441 | } | ||
1442 | |||
1425 | DMINFO("version %u.%u.%u loaded", | 1443 | DMINFO("version %u.%u.%u loaded", |
1426 | multipath_target.version[0], multipath_target.version[1], | 1444 | multipath_target.version[0], multipath_target.version[1], |
1427 | multipath_target.version[2]); | 1445 | multipath_target.version[2]); |
@@ -1433,6 +1451,7 @@ static void __exit dm_multipath_exit(void) | |||
1433 | { | 1451 | { |
1434 | int r; | 1452 | int r; |
1435 | 1453 | ||
1454 | destroy_workqueue(kmpath_handlerd); | ||
1436 | destroy_workqueue(kmultipathd); | 1455 | destroy_workqueue(kmultipathd); |
1437 | 1456 | ||
1438 | r = dm_unregister_target(&multipath_target); | 1457 | r = dm_unregister_target(&multipath_target); |
@@ -1441,8 +1460,6 @@ static void __exit dm_multipath_exit(void) | |||
1441 | kmem_cache_destroy(_mpio_cache); | 1460 | kmem_cache_destroy(_mpio_cache); |
1442 | } | 1461 | } |
1443 | 1462 | ||
1444 | EXPORT_SYMBOL_GPL(dm_pg_init_complete); | ||
1445 | |||
1446 | module_init(dm_multipath_init); | 1463 | module_init(dm_multipath_init); |
1447 | module_exit(dm_multipath_exit); | 1464 | module_exit(dm_multipath_exit); |
1448 | 1465 | ||