aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorChandra Seetharaman <sekharan@us.ibm.com>2008-05-01 17:50:11 -0400
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2008-06-05 10:23:41 -0400
commitcfae5c9bb66325cd32d5f2ee41f14749f062a53c (patch)
treeabceb01b7053f77366b37331e9b4f8408c89df60 /drivers/md
parent5e7dccad3621f6e2b572f309cf830a2c902cae80 (diff)
[SCSI] scsi_dh: Use SCSI device handler in dm-multipath
This patch converts dm-mpath to use scsi device handlers instead of dm's hardware handlers. This patch does not add any new functionality. Old behaviors remain and userspace tools work as is except that arguments supplied with hardware handler are ignored. One behavioral exception is: Activation of a path is synchronous in this patch, opposed to the older behavior of being asynchronous (changed in patch 07: scsi_dh: Add a single threaded workqueue for initializing a path) Note: There is no need to get a reference for the device handler module (as it was done in the dm hardware handler case) here as the reference is held when the device was first found. Instead we check and make sure that support for the specified device is present at table load time. Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com> Signed-off-by: Mike Christie <michaelc@cs.wisc.edu> Acked-by: Alasdair G Kergon <agk@redhat.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/Kconfig1
-rw-r--r--drivers/md/dm-mpath.c131
2 files changed, 81 insertions, 51 deletions
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 610af916891e..5303af55d2c7 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -252,6 +252,7 @@ config DM_ZERO
252config DM_MULTIPATH 252config DM_MULTIPATH
253 tristate "Multipath target" 253 tristate "Multipath target"
254 depends on BLK_DEV_DM 254 depends on BLK_DEV_DM
255 select SCSI_DH
255 ---help--- 256 ---help---
256 Allow volume managers to support multipath hardware. 257 Allow volume managers to support multipath hardware.
257 258
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index e7ee59e655d5..e54ff372d711 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -20,6 +20,7 @@
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/time.h> 21#include <linux/time.h>
22#include <linux/workqueue.h> 22#include <linux/workqueue.h>
23#include <scsi/scsi_dh.h>
23#include <asm/atomic.h> 24#include <asm/atomic.h>
24 25
25#define DM_MSG_PREFIX "multipath" 26#define DM_MSG_PREFIX "multipath"
@@ -61,7 +62,7 @@ struct multipath {
61 62
62 spinlock_t lock; 63 spinlock_t lock;
63 64
64 struct hw_handler hw_handler; 65 const char *hw_handler_name;
65 unsigned nr_priority_groups; 66 unsigned nr_priority_groups;
66 struct list_head priority_groups; 67 struct list_head priority_groups;
67 unsigned pg_init_required; /* pg_init needs calling? */ 68 unsigned pg_init_required; /* pg_init needs calling? */
@@ -109,6 +110,7 @@ static struct kmem_cache *_mpio_cache;
109static struct workqueue_struct *kmultipathd; 110static struct workqueue_struct *kmultipathd;
110static void process_queued_ios(struct work_struct *work); 111static void process_queued_ios(struct work_struct *work);
111static void trigger_event(struct work_struct *work); 112static void trigger_event(struct work_struct *work);
113static void pg_init_done(struct dm_path *, int);
112 114
113 115
114/*----------------------------------------------- 116/*-----------------------------------------------
@@ -193,18 +195,13 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
193static void free_multipath(struct multipath *m) 195static void free_multipath(struct multipath *m)
194{ 196{
195 struct priority_group *pg, *tmp; 197 struct priority_group *pg, *tmp;
196 struct hw_handler *hwh = &m->hw_handler;
197 198
198 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) { 199 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
199 list_del(&pg->list); 200 list_del(&pg->list);
200 free_priority_group(pg, m->ti); 201 free_priority_group(pg, m->ti);
201 } 202 }
202 203
203 if (hwh->type) { 204 kfree(m->hw_handler_name);
204 hwh->type->destroy(hwh);
205 dm_put_hw_handler(hwh->type);
206 }
207
208 mempool_destroy(m->mpio_pool); 205 mempool_destroy(m->mpio_pool);
209 kfree(m); 206 kfree(m);
210} 207}
@@ -216,12 +213,10 @@ static void free_multipath(struct multipath *m)
216 213
217static void __switch_pg(struct multipath *m, struct pgpath *pgpath) 214static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
218{ 215{
219 struct hw_handler *hwh = &m->hw_handler;
220
221 m->current_pg = pgpath->pg; 216 m->current_pg = pgpath->pg;
222 217
223 /* Must we initialise the PG first, and queue I/O till it's ready? */ 218 /* Must we initialise the PG first, and queue I/O till it's ready? */
224 if (hwh->type && hwh->type->pg_init) { 219 if (m->hw_handler_name) {
225 m->pg_init_required = 1; 220 m->pg_init_required = 1;
226 m->queue_io = 1; 221 m->queue_io = 1;
227 } else { 222 } else {
@@ -409,7 +404,6 @@ static void process_queued_ios(struct work_struct *work)
409{ 404{
410 struct multipath *m = 405 struct multipath *m =
411 container_of(work, struct multipath, process_queued_ios); 406 container_of(work, struct multipath, process_queued_ios);
412 struct hw_handler *hwh = &m->hw_handler;
413 struct pgpath *pgpath = NULL; 407 struct pgpath *pgpath = NULL;
414 unsigned init_required = 0, must_queue = 1; 408 unsigned init_required = 0, must_queue = 1;
415 unsigned long flags; 409 unsigned long flags;
@@ -438,8 +432,11 @@ static void process_queued_ios(struct work_struct *work)
438out: 432out:
439 spin_unlock_irqrestore(&m->lock, flags); 433 spin_unlock_irqrestore(&m->lock, flags);
440 434
441 if (init_required) 435 if (init_required) {
442 hwh->type->pg_init(hwh, pgpath->pg->bypassed, &pgpath->path); 436 struct dm_path *path = &pgpath->path;
437 int ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev));
438 pg_init_done(path, ret);
439 }
443 440
444 if (!must_queue) 441 if (!must_queue)
445 dispatch_queued_ios(m); 442 dispatch_queued_ios(m);
@@ -652,8 +649,6 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
652 649
653static int parse_hw_handler(struct arg_set *as, struct multipath *m) 650static int parse_hw_handler(struct arg_set *as, struct multipath *m)
654{ 651{
655 int r;
656 struct hw_handler_type *hwht;
657 unsigned hw_argc; 652 unsigned hw_argc;
658 struct dm_target *ti = m->ti; 653 struct dm_target *ti = m->ti;
659 654
@@ -661,30 +656,18 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m)
661 {0, 1024, "invalid number of hardware handler args"}, 656 {0, 1024, "invalid number of hardware handler args"},
662 }; 657 };
663 658
664 r = read_param(_params, shift(as), &hw_argc, &ti->error); 659 if (read_param(_params, shift(as), &hw_argc, &ti->error))
665 if (r)
666 return -EINVAL; 660 return -EINVAL;
667 661
668 if (!hw_argc) 662 if (!hw_argc)
669 return 0; 663 return 0;
670 664
671 hwht = dm_get_hw_handler(shift(as)); 665 m->hw_handler_name = kstrdup(shift(as), GFP_KERNEL);
672 if (!hwht) { 666 request_module("scsi_dh_%s", m->hw_handler_name);
667 if (scsi_dh_handler_exist(m->hw_handler_name) == 0) {
673 ti->error = "unknown hardware handler type"; 668 ti->error = "unknown hardware handler type";
674 return -EINVAL; 669 return -EINVAL;
675 } 670 }
676
677 m->hw_handler.md = dm_table_get_md(ti->table);
678 dm_put(m->hw_handler.md);
679
680 r = hwht->create(&m->hw_handler, hw_argc - 1, as->argv);
681 if (r) {
682 dm_put_hw_handler(hwht);
683 ti->error = "hardware handler constructor failed";
684 return r;
685 }
686
687 m->hw_handler.type = hwht;
688 consume(as, hw_argc - 1); 671 consume(as, hw_argc - 1);
689 672
690 return 0; 673 return 0;
@@ -1063,14 +1046,74 @@ void dm_pg_init_complete(struct dm_path *path, unsigned err_flags)
1063 spin_unlock_irqrestore(&m->lock, flags); 1046 spin_unlock_irqrestore(&m->lock, flags);
1064} 1047}
1065 1048
1049static void pg_init_done(struct dm_path *path, int errors)
1050{
1051 struct pgpath *pgpath = path_to_pgpath(path);
1052 struct priority_group *pg = pgpath->pg;
1053 struct multipath *m = pg->m;
1054 unsigned long flags;
1055
1056 /* device or driver problems */
1057 switch (errors) {
1058 case SCSI_DH_OK:
1059 break;
1060 case SCSI_DH_NOSYS:
1061 if (!m->hw_handler_name) {
1062 errors = 0;
1063 break;
1064 }
1065 DMERR("Cannot failover device because scsi_dh_%s was not "
1066 "loaded.", m->hw_handler_name);
1067 /*
1068 * Fail path for now, so we do not ping pong
1069 */
1070 fail_path(pgpath);
1071 break;
1072 case SCSI_DH_DEV_TEMP_BUSY:
1073 /*
1074 * Probably doing something like FW upgrade on the
1075 * controller so try the other pg.
1076 */
1077 bypass_pg(m, pg, 1);
1078 break;
1079 /* TODO: For SCSI_DH_RETRY we should wait a couple seconds */
1080 case SCSI_DH_RETRY:
1081 case SCSI_DH_IMM_RETRY:
1082 case SCSI_DH_RES_TEMP_UNAVAIL:
1083 if (pg_init_limit_reached(m, pgpath))
1084 fail_path(pgpath);
1085 errors = 0;
1086 break;
1087 default:
1088 /*
1089 * We probably do not want to fail the path for a device
1090 * error, but this is what the old dm did. In future
1091 * patches we can do more advanced handling.
1092 */
1093 fail_path(pgpath);
1094 }
1095
1096 spin_lock_irqsave(&m->lock, flags);
1097 if (errors) {
1098 DMERR("Could not failover device. Error %d.", errors);
1099 m->current_pgpath = NULL;
1100 m->current_pg = NULL;
1101 } else if (!m->pg_init_required) {
1102 m->queue_io = 0;
1103 pg->bypassed = 0;
1104 }
1105
1106 m->pg_init_in_progress = 0;
1107 queue_work(kmultipathd, &m->process_queued_ios);
1108 spin_unlock_irqrestore(&m->lock, flags);
1109}
1110
1066/* 1111/*
1067 * end_io handling 1112 * end_io handling
1068 */ 1113 */
1069static int do_end_io(struct multipath *m, struct bio *bio, 1114static int do_end_io(struct multipath *m, struct bio *bio,
1070 int error, struct dm_mpath_io *mpio) 1115 int error, struct dm_mpath_io *mpio)
1071{ 1116{
1072 struct hw_handler *hwh = &m->hw_handler;
1073 unsigned err_flags = MP_FAIL_PATH; /* Default behavior */
1074 unsigned long flags; 1117 unsigned long flags;
1075 1118
1076 if (!error) 1119 if (!error)
@@ -1097,19 +1140,8 @@ static int do_end_io(struct multipath *m, struct bio *bio,
1097 } 1140 }
1098 spin_unlock_irqrestore(&m->lock, flags); 1141 spin_unlock_irqrestore(&m->lock, flags);
1099 1142
1100 if (hwh->type && hwh->type->error) 1143 if (mpio->pgpath)
1101 err_flags = hwh->type->error(hwh, bio); 1144 fail_path(mpio->pgpath);
1102
1103 if (mpio->pgpath) {
1104 if (err_flags & MP_FAIL_PATH)
1105 fail_path(mpio->pgpath);
1106
1107 if (err_flags & MP_BYPASS_PG)
1108 bypass_pg(m, mpio->pgpath->pg, 1);
1109 }
1110
1111 if (err_flags & MP_ERROR_IO)
1112 return -EIO;
1113 1145
1114 requeue: 1146 requeue:
1115 dm_bio_restore(&mpio->details, bio); 1147 dm_bio_restore(&mpio->details, bio);
@@ -1194,7 +1226,6 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
1194 int sz = 0; 1226 int sz = 0;
1195 unsigned long flags; 1227 unsigned long flags;
1196 struct multipath *m = (struct multipath *) ti->private; 1228 struct multipath *m = (struct multipath *) ti->private;
1197 struct hw_handler *hwh = &m->hw_handler;
1198 struct priority_group *pg; 1229 struct priority_group *pg;
1199 struct pgpath *p; 1230 struct pgpath *p;
1200 unsigned pg_num; 1231 unsigned pg_num;
@@ -1214,12 +1245,10 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
1214 DMEMIT("pg_init_retries %u ", m->pg_init_retries); 1245 DMEMIT("pg_init_retries %u ", m->pg_init_retries);
1215 } 1246 }
1216 1247
1217 if (hwh->type && hwh->type->status) 1248 if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1218 sz += hwh->type->status(hwh, type, result + sz, maxlen - sz);
1219 else if (!hwh->type || type == STATUSTYPE_INFO)
1220 DMEMIT("0 "); 1249 DMEMIT("0 ");
1221 else 1250 else
1222 DMEMIT("1 %s ", hwh->type->name); 1251 DMEMIT("1 %s ", m->hw_handler_name);
1223 1252
1224 DMEMIT("%u ", m->nr_priority_groups); 1253 DMEMIT("%u ", m->nr_priority_groups);
1225 1254