diff options
author | Chandra Seetharaman <sekharan@us.ibm.com> | 2008-05-01 17:50:34 -0400 |
---|---|---|
committer | James Bottomley <James.Bottomley@HansenPartnership.com> | 2008-06-05 10:23:41 -0400 |
commit | cb520223d7f22c5386aff27a5856a66e2c32aaac (patch) | |
tree | 850268071c54a99e4099de6875bad15d436781ab /drivers | |
parent | 2651f5d7d3bc5120a439e498f131e4d731f99b3e (diff) |
[SCSI] scsi_dh: Remove hardware handlers from dm
This patch removes the 3 hardware handlers that currently exist
under dm as the functionality is moved to SCSI layer in the earlier
patches.
[jejb: removed more makefile hunks and rejection fixes]
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Acked-by: Alasdair G Kergon <agk@redhat.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/md/Kconfig | 18 | ||||
-rw-r--r-- | drivers/md/Makefile | 5 | ||||
-rw-r--r-- | drivers/md/dm-emc.c | 345 | ||||
-rw-r--r-- | drivers/md/dm-mpath-hp-sw.c | 247 | ||||
-rw-r--r-- | drivers/md/dm-mpath-rdac.c | 700 |
5 files changed, 0 insertions, 1315 deletions
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 5303af55d2c7..b4a3c7d1451d 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig | |||
@@ -256,24 +256,6 @@ config DM_MULTIPATH | |||
256 | ---help--- | 256 | ---help--- |
257 | Allow volume managers to support multipath hardware. | 257 | Allow volume managers to support multipath hardware. |
258 | 258 | ||
259 | config DM_MULTIPATH_EMC | ||
260 | tristate "EMC CX/AX multipath support" | ||
261 | depends on DM_MULTIPATH && BLK_DEV_DM | ||
262 | ---help--- | ||
263 | Multipath support for EMC CX/AX series hardware. | ||
264 | |||
265 | config DM_MULTIPATH_RDAC | ||
266 | tristate "LSI/Engenio RDAC multipath support (EXPERIMENTAL)" | ||
267 | depends on DM_MULTIPATH && BLK_DEV_DM && SCSI && EXPERIMENTAL | ||
268 | ---help--- | ||
269 | Multipath support for LSI/Engenio RDAC. | ||
270 | |||
271 | config DM_MULTIPATH_HP | ||
272 | tristate "HP MSA multipath support (EXPERIMENTAL)" | ||
273 | depends on DM_MULTIPATH && BLK_DEV_DM && SCSI && EXPERIMENTAL | ||
274 | ---help--- | ||
275 | Multipath support for HP MSA (Active/Passive) series hardware. | ||
276 | |||
277 | config DM_DELAY | 259 | config DM_DELAY |
278 | tristate "I/O delaying target (EXPERIMENTAL)" | 260 | tristate "I/O delaying target (EXPERIMENTAL)" |
279 | depends on BLK_DEV_DM && EXPERIMENTAL | 261 | depends on BLK_DEV_DM && EXPERIMENTAL |
diff --git a/drivers/md/Makefile b/drivers/md/Makefile index 7be09eeea293..62e141e86d6f 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile | |||
@@ -7,8 +7,6 @@ dm-mod-objs := dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \ | |||
7 | dm-multipath-objs := dm-hw-handler.o dm-path-selector.o dm-mpath.o | 7 | dm-multipath-objs := dm-hw-handler.o dm-path-selector.o dm-mpath.o |
8 | dm-snapshot-objs := dm-snap.o dm-exception-store.o | 8 | dm-snapshot-objs := dm-snap.o dm-exception-store.o |
9 | dm-mirror-objs := dm-raid1.o | 9 | dm-mirror-objs := dm-raid1.o |
10 | dm-rdac-objs := dm-mpath-rdac.o | ||
11 | dm-hp-sw-objs := dm-mpath-hp-sw.o | ||
12 | md-mod-objs := md.o bitmap.o | 10 | md-mod-objs := md.o bitmap.o |
13 | raid456-objs := raid5.o raid6algos.o raid6recov.o raid6tables.o \ | 11 | raid456-objs := raid5.o raid6algos.o raid6recov.o raid6tables.o \ |
14 | raid6int1.o raid6int2.o raid6int4.o \ | 12 | raid6int1.o raid6int2.o raid6int4.o \ |
@@ -35,9 +33,6 @@ obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o | |||
35 | obj-$(CONFIG_DM_CRYPT) += dm-crypt.o | 33 | obj-$(CONFIG_DM_CRYPT) += dm-crypt.o |
36 | obj-$(CONFIG_DM_DELAY) += dm-delay.o | 34 | obj-$(CONFIG_DM_DELAY) += dm-delay.o |
37 | obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o | 35 | obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o |
38 | obj-$(CONFIG_DM_MULTIPATH_EMC) += dm-emc.o | ||
39 | obj-$(CONFIG_DM_MULTIPATH_HP) += dm-hp-sw.o | ||
40 | obj-$(CONFIG_DM_MULTIPATH_RDAC) += dm-rdac.o | ||
41 | obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o | 36 | obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o |
42 | obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o | 37 | obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o |
43 | obj-$(CONFIG_DM_ZERO) += dm-zero.o | 38 | obj-$(CONFIG_DM_ZERO) += dm-zero.o |
diff --git a/drivers/md/dm-emc.c b/drivers/md/dm-emc.c deleted file mode 100644 index 3ea5ad4b7805..000000000000 --- a/drivers/md/dm-emc.c +++ /dev/null | |||
@@ -1,345 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004 SUSE LINUX Products GmbH. All rights reserved. | ||
3 | * Copyright (C) 2004 Red Hat, Inc. All rights reserved. | ||
4 | * | ||
5 | * This file is released under the GPL. | ||
6 | * | ||
7 | * Multipath support for EMC CLARiiON AX/CX-series hardware. | ||
8 | */ | ||
9 | |||
10 | #include "dm.h" | ||
11 | #include "dm-hw-handler.h" | ||
12 | #include <scsi/scsi.h> | ||
13 | #include <scsi/scsi_cmnd.h> | ||
14 | |||
15 | #define DM_MSG_PREFIX "multipath emc" | ||
16 | |||
17 | struct emc_handler { | ||
18 | spinlock_t lock; | ||
19 | |||
20 | /* Whether we should send the short trespass command (FC-series) | ||
21 | * or the long version (default for AX/CX CLARiiON arrays). */ | ||
22 | unsigned short_trespass; | ||
23 | /* Whether or not to honor SCSI reservations when initiating a | ||
24 | * switch-over. Default: Don't. */ | ||
25 | unsigned hr; | ||
26 | |||
27 | unsigned char sense[SCSI_SENSE_BUFFERSIZE]; | ||
28 | }; | ||
29 | |||
30 | #define TRESPASS_PAGE 0x22 | ||
31 | #define EMC_FAILOVER_TIMEOUT (60 * HZ) | ||
32 | |||
33 | /* Code borrowed from dm-lsi-rdac by Mike Christie */ | ||
34 | |||
35 | static inline void free_bio(struct bio *bio) | ||
36 | { | ||
37 | __free_page(bio->bi_io_vec[0].bv_page); | ||
38 | bio_put(bio); | ||
39 | } | ||
40 | |||
41 | static void emc_endio(struct bio *bio, int error) | ||
42 | { | ||
43 | struct dm_path *path = bio->bi_private; | ||
44 | |||
45 | /* We also need to look at the sense keys here whether or not to | ||
46 | * switch to the next PG etc. | ||
47 | * | ||
48 | * For now simple logic: either it works or it doesn't. | ||
49 | */ | ||
50 | if (error) | ||
51 | dm_pg_init_complete(path, MP_FAIL_PATH); | ||
52 | else | ||
53 | dm_pg_init_complete(path, 0); | ||
54 | |||
55 | /* request is freed in block layer */ | ||
56 | free_bio(bio); | ||
57 | } | ||
58 | |||
59 | static struct bio *get_failover_bio(struct dm_path *path, unsigned data_size) | ||
60 | { | ||
61 | struct bio *bio; | ||
62 | struct page *page; | ||
63 | |||
64 | bio = bio_alloc(GFP_ATOMIC, 1); | ||
65 | if (!bio) { | ||
66 | DMERR("get_failover_bio: bio_alloc() failed."); | ||
67 | return NULL; | ||
68 | } | ||
69 | |||
70 | bio->bi_rw |= (1 << BIO_RW); | ||
71 | bio->bi_bdev = path->dev->bdev; | ||
72 | bio->bi_sector = 0; | ||
73 | bio->bi_private = path; | ||
74 | bio->bi_end_io = emc_endio; | ||
75 | |||
76 | page = alloc_page(GFP_ATOMIC); | ||
77 | if (!page) { | ||
78 | DMERR("get_failover_bio: alloc_page() failed."); | ||
79 | bio_put(bio); | ||
80 | return NULL; | ||
81 | } | ||
82 | |||
83 | if (bio_add_page(bio, page, data_size, 0) != data_size) { | ||
84 | DMERR("get_failover_bio: bio_add_page() failed."); | ||
85 | __free_page(page); | ||
86 | bio_put(bio); | ||
87 | return NULL; | ||
88 | } | ||
89 | |||
90 | return bio; | ||
91 | } | ||
92 | |||
93 | static struct request *get_failover_req(struct emc_handler *h, | ||
94 | struct bio *bio, struct dm_path *path) | ||
95 | { | ||
96 | struct request *rq; | ||
97 | struct block_device *bdev = bio->bi_bdev; | ||
98 | struct request_queue *q = bdev_get_queue(bdev); | ||
99 | |||
100 | /* FIXME: Figure out why it fails with GFP_ATOMIC. */ | ||
101 | rq = blk_get_request(q, WRITE, __GFP_WAIT); | ||
102 | if (!rq) { | ||
103 | DMERR("get_failover_req: blk_get_request failed"); | ||
104 | return NULL; | ||
105 | } | ||
106 | |||
107 | blk_rq_append_bio(q, rq, bio); | ||
108 | |||
109 | rq->sense = h->sense; | ||
110 | memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); | ||
111 | rq->sense_len = 0; | ||
112 | |||
113 | rq->timeout = EMC_FAILOVER_TIMEOUT; | ||
114 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | ||
115 | rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; | ||
116 | |||
117 | return rq; | ||
118 | } | ||
119 | |||
120 | static struct request *emc_trespass_get(struct emc_handler *h, | ||
121 | struct dm_path *path) | ||
122 | { | ||
123 | struct bio *bio; | ||
124 | struct request *rq; | ||
125 | unsigned char *page22; | ||
126 | unsigned char long_trespass_pg[] = { | ||
127 | 0, 0, 0, 0, | ||
128 | TRESPASS_PAGE, /* Page code */ | ||
129 | 0x09, /* Page length - 2 */ | ||
130 | h->hr ? 0x01 : 0x81, /* Trespass code + Honor reservation bit */ | ||
131 | 0xff, 0xff, /* Trespass target */ | ||
132 | 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */ | ||
133 | }; | ||
134 | unsigned char short_trespass_pg[] = { | ||
135 | 0, 0, 0, 0, | ||
136 | TRESPASS_PAGE, /* Page code */ | ||
137 | 0x02, /* Page length - 2 */ | ||
138 | h->hr ? 0x01 : 0x81, /* Trespass code + Honor reservation bit */ | ||
139 | 0xff, /* Trespass target */ | ||
140 | }; | ||
141 | unsigned data_size = h->short_trespass ? sizeof(short_trespass_pg) : | ||
142 | sizeof(long_trespass_pg); | ||
143 | |||
144 | /* get bio backing */ | ||
145 | if (data_size > PAGE_SIZE) | ||
146 | /* this should never happen */ | ||
147 | return NULL; | ||
148 | |||
149 | bio = get_failover_bio(path, data_size); | ||
150 | if (!bio) { | ||
151 | DMERR("emc_trespass_get: no bio"); | ||
152 | return NULL; | ||
153 | } | ||
154 | |||
155 | page22 = (unsigned char *)bio_data(bio); | ||
156 | memset(page22, 0, data_size); | ||
157 | |||
158 | memcpy(page22, h->short_trespass ? | ||
159 | short_trespass_pg : long_trespass_pg, data_size); | ||
160 | |||
161 | /* get request for block layer packet command */ | ||
162 | rq = get_failover_req(h, bio, path); | ||
163 | if (!rq) { | ||
164 | DMERR("emc_trespass_get: no rq"); | ||
165 | free_bio(bio); | ||
166 | return NULL; | ||
167 | } | ||
168 | |||
169 | /* Prepare the command. */ | ||
170 | rq->cmd[0] = MODE_SELECT; | ||
171 | rq->cmd[1] = 0x10; | ||
172 | rq->cmd[4] = data_size; | ||
173 | rq->cmd_len = COMMAND_SIZE(rq->cmd[0]); | ||
174 | |||
175 | return rq; | ||
176 | } | ||
177 | |||
178 | static void emc_pg_init(struct hw_handler *hwh, unsigned bypassed, | ||
179 | struct dm_path *path) | ||
180 | { | ||
181 | struct request *rq; | ||
182 | struct request_queue *q = bdev_get_queue(path->dev->bdev); | ||
183 | |||
184 | /* | ||
185 | * We can either blindly init the pg (then look at the sense), | ||
186 | * or we can send some commands to get the state here (then | ||
187 | * possibly send the fo cmnd), or we can also have the | ||
188 | * initial state passed into us and then get an update here. | ||
189 | */ | ||
190 | if (!q) { | ||
191 | DMINFO("emc_pg_init: no queue"); | ||
192 | goto fail_path; | ||
193 | } | ||
194 | |||
195 | /* FIXME: The request should be pre-allocated. */ | ||
196 | rq = emc_trespass_get(hwh->context, path); | ||
197 | if (!rq) { | ||
198 | DMERR("emc_pg_init: no rq"); | ||
199 | goto fail_path; | ||
200 | } | ||
201 | |||
202 | DMINFO("emc_pg_init: sending switch-over command"); | ||
203 | elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 1); | ||
204 | return; | ||
205 | |||
206 | fail_path: | ||
207 | dm_pg_init_complete(path, MP_FAIL_PATH); | ||
208 | } | ||
209 | |||
210 | static struct emc_handler *alloc_emc_handler(void) | ||
211 | { | ||
212 | struct emc_handler *h = kzalloc(sizeof(*h), GFP_KERNEL); | ||
213 | |||
214 | if (h) | ||
215 | spin_lock_init(&h->lock); | ||
216 | |||
217 | return h; | ||
218 | } | ||
219 | |||
220 | static int emc_create(struct hw_handler *hwh, unsigned argc, char **argv) | ||
221 | { | ||
222 | struct emc_handler *h; | ||
223 | unsigned hr, short_trespass; | ||
224 | |||
225 | if (argc == 0) { | ||
226 | /* No arguments: use defaults */ | ||
227 | hr = 0; | ||
228 | short_trespass = 0; | ||
229 | } else if (argc != 2) { | ||
230 | DMWARN("incorrect number of arguments"); | ||
231 | return -EINVAL; | ||
232 | } else { | ||
233 | if ((sscanf(argv[0], "%u", &short_trespass) != 1) | ||
234 | || (short_trespass > 1)) { | ||
235 | DMWARN("invalid trespass mode selected"); | ||
236 | return -EINVAL; | ||
237 | } | ||
238 | |||
239 | if ((sscanf(argv[1], "%u", &hr) != 1) | ||
240 | || (hr > 1)) { | ||
241 | DMWARN("invalid honor reservation flag selected"); | ||
242 | return -EINVAL; | ||
243 | } | ||
244 | } | ||
245 | |||
246 | h = alloc_emc_handler(); | ||
247 | if (!h) | ||
248 | return -ENOMEM; | ||
249 | |||
250 | hwh->context = h; | ||
251 | |||
252 | if ((h->short_trespass = short_trespass)) | ||
253 | DMWARN("short trespass command will be send"); | ||
254 | else | ||
255 | DMWARN("long trespass command will be send"); | ||
256 | |||
257 | if ((h->hr = hr)) | ||
258 | DMWARN("honor reservation bit will be set"); | ||
259 | else | ||
260 | DMWARN("honor reservation bit will not be set (default)"); | ||
261 | |||
262 | return 0; | ||
263 | } | ||
264 | |||
265 | static void emc_destroy(struct hw_handler *hwh) | ||
266 | { | ||
267 | struct emc_handler *h = (struct emc_handler *) hwh->context; | ||
268 | |||
269 | kfree(h); | ||
270 | hwh->context = NULL; | ||
271 | } | ||
272 | |||
273 | static unsigned emc_error(struct hw_handler *hwh, struct bio *bio) | ||
274 | { | ||
275 | /* FIXME: Patch from axboe still missing */ | ||
276 | #if 0 | ||
277 | int sense; | ||
278 | |||
279 | if (bio->bi_error & BIO_SENSE) { | ||
280 | sense = bio->bi_error & 0xffffff; /* sense key / asc / ascq */ | ||
281 | |||
282 | if (sense == 0x020403) { | ||
283 | /* LUN Not Ready - Manual Intervention Required | ||
284 | * indicates this is a passive path. | ||
285 | * | ||
286 | * FIXME: However, if this is seen and EVPD C0 | ||
287 | * indicates that this is due to a NDU in | ||
288 | * progress, we should set FAIL_PATH too. | ||
289 | * This indicates we might have to do a SCSI | ||
290 | * inquiry in the end_io path. Ugh. */ | ||
291 | return MP_BYPASS_PG | MP_RETRY_IO; | ||
292 | } else if (sense == 0x052501) { | ||
293 | /* An array based copy is in progress. Do not | ||
294 | * fail the path, do not bypass to another PG, | ||
295 | * do not retry. Fail the IO immediately. | ||
296 | * (Actually this is the same conclusion as in | ||
297 | * the default handler, but lets make sure.) */ | ||
298 | return 0; | ||
299 | } else if (sense == 0x062900) { | ||
300 | /* Unit Attention Code. This is the first IO | ||
301 | * to the new path, so just retry. */ | ||
302 | return MP_RETRY_IO; | ||
303 | } | ||
304 | } | ||
305 | #endif | ||
306 | |||
307 | /* Try default handler */ | ||
308 | return dm_scsi_err_handler(hwh, bio); | ||
309 | } | ||
310 | |||
311 | static struct hw_handler_type emc_hwh = { | ||
312 | .name = "emc", | ||
313 | .module = THIS_MODULE, | ||
314 | .create = emc_create, | ||
315 | .destroy = emc_destroy, | ||
316 | .pg_init = emc_pg_init, | ||
317 | .error = emc_error, | ||
318 | }; | ||
319 | |||
320 | static int __init dm_emc_init(void) | ||
321 | { | ||
322 | int r = dm_register_hw_handler(&emc_hwh); | ||
323 | |||
324 | if (r < 0) | ||
325 | DMERR("register failed %d", r); | ||
326 | |||
327 | DMINFO("version 0.0.3 loaded"); | ||
328 | |||
329 | return r; | ||
330 | } | ||
331 | |||
332 | static void __exit dm_emc_exit(void) | ||
333 | { | ||
334 | int r = dm_unregister_hw_handler(&emc_hwh); | ||
335 | |||
336 | if (r < 0) | ||
337 | DMERR("unregister failed %d", r); | ||
338 | } | ||
339 | |||
340 | module_init(dm_emc_init); | ||
341 | module_exit(dm_emc_exit); | ||
342 | |||
343 | MODULE_DESCRIPTION(DM_NAME " EMC CX/AX/FC-family multipath"); | ||
344 | MODULE_AUTHOR("Lars Marowsky-Bree <lmb@suse.de>"); | ||
345 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/md/dm-mpath-hp-sw.c b/drivers/md/dm-mpath-hp-sw.c deleted file mode 100644 index b63a0ab37c53..000000000000 --- a/drivers/md/dm-mpath-hp-sw.c +++ /dev/null | |||
@@ -1,247 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 Mike Christie, All rights reserved. | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All rights reserved. | ||
4 | * Authors: Mike Christie | ||
5 | * Dave Wysochanski | ||
6 | * | ||
7 | * This file is released under the GPL. | ||
8 | * | ||
9 | * This module implements the specific path activation code for | ||
10 | * HP StorageWorks and FSC FibreCat Asymmetric (Active/Passive) | ||
11 | * storage arrays. | ||
12 | * These storage arrays have controller-based failover, not | ||
13 | * LUN-based failover. However, LUN-based failover is the design | ||
14 | * of dm-multipath. Thus, this module is written for LUN-based failover. | ||
15 | */ | ||
16 | #include <linux/blkdev.h> | ||
17 | #include <linux/list.h> | ||
18 | #include <linux/types.h> | ||
19 | #include <scsi/scsi.h> | ||
20 | #include <scsi/scsi_cmnd.h> | ||
21 | #include <scsi/scsi_dbg.h> | ||
22 | |||
23 | #include "dm.h" | ||
24 | #include "dm-hw-handler.h" | ||
25 | |||
26 | #define DM_MSG_PREFIX "multipath hp-sw" | ||
27 | #define DM_HP_HWH_NAME "hp-sw" | ||
28 | #define DM_HP_HWH_VER "1.0.0" | ||
29 | |||
30 | struct hp_sw_context { | ||
31 | unsigned char sense[SCSI_SENSE_BUFFERSIZE]; | ||
32 | }; | ||
33 | |||
34 | /* | ||
35 | * hp_sw_error_is_retryable - Is an HP-specific check condition retryable? | ||
36 | * @req: path activation request | ||
37 | * | ||
38 | * Examine error codes of request and determine whether the error is retryable. | ||
39 | * Some error codes are already retried by scsi-ml (see | ||
40 | * scsi_decide_disposition), but some HP specific codes are not. | ||
41 | * The intent of this routine is to supply the logic for the HP specific | ||
42 | * check conditions. | ||
43 | * | ||
44 | * Returns: | ||
45 | * 1 - command completed with retryable error | ||
46 | * 0 - command completed with non-retryable error | ||
47 | * | ||
48 | * Possible optimizations | ||
49 | * 1. More hardware-specific error codes | ||
50 | */ | ||
51 | static int hp_sw_error_is_retryable(struct request *req) | ||
52 | { | ||
53 | /* | ||
54 | * NOT_READY is known to be retryable | ||
55 | * For now we just dump out the sense data and call it retryable | ||
56 | */ | ||
57 | if (status_byte(req->errors) == CHECK_CONDITION) | ||
58 | __scsi_print_sense(DM_HP_HWH_NAME, req->sense, req->sense_len); | ||
59 | |||
60 | /* | ||
61 | * At this point we don't have complete information about all the error | ||
62 | * codes from this hardware, so we are just conservative and retry | ||
63 | * when in doubt. | ||
64 | */ | ||
65 | return 1; | ||
66 | } | ||
67 | |||
68 | /* | ||
69 | * hp_sw_end_io - Completion handler for HP path activation. | ||
70 | * @req: path activation request | ||
71 | * @error: scsi-ml error | ||
72 | * | ||
73 | * Check sense data, free request structure, and notify dm that | ||
74 | * pg initialization has completed. | ||
75 | * | ||
76 | * Context: scsi-ml softirq | ||
77 | * | ||
78 | */ | ||
79 | static void hp_sw_end_io(struct request *req, int error) | ||
80 | { | ||
81 | struct dm_path *path = req->end_io_data; | ||
82 | unsigned err_flags = 0; | ||
83 | |||
84 | if (!error) { | ||
85 | DMDEBUG("%s path activation command - success", | ||
86 | path->dev->name); | ||
87 | goto out; | ||
88 | } | ||
89 | |||
90 | if (hp_sw_error_is_retryable(req)) { | ||
91 | DMDEBUG("%s path activation command - retry", | ||
92 | path->dev->name); | ||
93 | err_flags = MP_RETRY; | ||
94 | goto out; | ||
95 | } | ||
96 | |||
97 | DMWARN("%s path activation fail - error=0x%x", | ||
98 | path->dev->name, error); | ||
99 | err_flags = MP_FAIL_PATH; | ||
100 | |||
101 | out: | ||
102 | req->end_io_data = NULL; | ||
103 | __blk_put_request(req->q, req); | ||
104 | dm_pg_init_complete(path, err_flags); | ||
105 | } | ||
106 | |||
107 | /* | ||
108 | * hp_sw_get_request - Allocate an HP specific path activation request | ||
109 | * @path: path on which request will be sent (needed for request queue) | ||
110 | * | ||
111 | * The START command is used for path activation request. | ||
112 | * These arrays are controller-based failover, not LUN based. | ||
113 | * One START command issued to a single path will fail over all | ||
114 | * LUNs for the same controller. | ||
115 | * | ||
116 | * Possible optimizations | ||
117 | * 1. Make timeout configurable | ||
118 | * 2. Preallocate request | ||
119 | */ | ||
120 | static struct request *hp_sw_get_request(struct dm_path *path) | ||
121 | { | ||
122 | struct request *req; | ||
123 | struct block_device *bdev = path->dev->bdev; | ||
124 | struct request_queue *q = bdev_get_queue(bdev); | ||
125 | struct hp_sw_context *h = path->hwhcontext; | ||
126 | |||
127 | req = blk_get_request(q, WRITE, GFP_NOIO); | ||
128 | if (!req) | ||
129 | goto out; | ||
130 | |||
131 | req->timeout = 60 * HZ; | ||
132 | |||
133 | req->errors = 0; | ||
134 | req->cmd_type = REQ_TYPE_BLOCK_PC; | ||
135 | req->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; | ||
136 | req->end_io_data = path; | ||
137 | req->sense = h->sense; | ||
138 | memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE); | ||
139 | |||
140 | req->cmd[0] = START_STOP; | ||
141 | req->cmd[4] = 1; | ||
142 | req->cmd_len = COMMAND_SIZE(req->cmd[0]); | ||
143 | |||
144 | out: | ||
145 | return req; | ||
146 | } | ||
147 | |||
148 | /* | ||
149 | * hp_sw_pg_init - HP path activation implementation. | ||
150 | * @hwh: hardware handler specific data | ||
151 | * @bypassed: unused; is the path group bypassed? (see dm-mpath.c) | ||
152 | * @path: path to send initialization command | ||
153 | * | ||
154 | * Send an HP-specific path activation command on 'path'. | ||
155 | * Do not try to optimize in any way, just send the activation command. | ||
156 | * More than one path activation command may be sent to the same controller. | ||
157 | * This seems to work fine for basic failover support. | ||
158 | * | ||
159 | * Possible optimizations | ||
160 | * 1. Detect an in-progress activation request and avoid submitting another one | ||
161 | * 2. Model the controller and only send a single activation request at a time | ||
162 | * 3. Determine the state of a path before sending an activation request | ||
163 | * | ||
164 | * Context: kmpathd (see process_queued_ios() in dm-mpath.c) | ||
165 | */ | ||
166 | static void hp_sw_pg_init(struct hw_handler *hwh, unsigned bypassed, | ||
167 | struct dm_path *path) | ||
168 | { | ||
169 | struct request *req; | ||
170 | struct hp_sw_context *h; | ||
171 | |||
172 | path->hwhcontext = hwh->context; | ||
173 | h = hwh->context; | ||
174 | |||
175 | req = hp_sw_get_request(path); | ||
176 | if (!req) { | ||
177 | DMERR("%s path activation command - allocation fail", | ||
178 | path->dev->name); | ||
179 | goto retry; | ||
180 | } | ||
181 | |||
182 | DMDEBUG("%s path activation command - sent", path->dev->name); | ||
183 | |||
184 | blk_execute_rq_nowait(req->q, NULL, req, 1, hp_sw_end_io); | ||
185 | return; | ||
186 | |||
187 | retry: | ||
188 | dm_pg_init_complete(path, MP_RETRY); | ||
189 | } | ||
190 | |||
191 | static int hp_sw_create(struct hw_handler *hwh, unsigned argc, char **argv) | ||
192 | { | ||
193 | struct hp_sw_context *h; | ||
194 | |||
195 | h = kmalloc(sizeof(*h), GFP_KERNEL); | ||
196 | if (!h) | ||
197 | return -ENOMEM; | ||
198 | |||
199 | hwh->context = h; | ||
200 | |||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | static void hp_sw_destroy(struct hw_handler *hwh) | ||
205 | { | ||
206 | struct hp_sw_context *h = hwh->context; | ||
207 | |||
208 | kfree(h); | ||
209 | } | ||
210 | |||
211 | static struct hw_handler_type hp_sw_hwh = { | ||
212 | .name = DM_HP_HWH_NAME, | ||
213 | .module = THIS_MODULE, | ||
214 | .create = hp_sw_create, | ||
215 | .destroy = hp_sw_destroy, | ||
216 | .pg_init = hp_sw_pg_init, | ||
217 | }; | ||
218 | |||
219 | static int __init hp_sw_init(void) | ||
220 | { | ||
221 | int r; | ||
222 | |||
223 | r = dm_register_hw_handler(&hp_sw_hwh); | ||
224 | if (r < 0) | ||
225 | DMERR("register failed %d", r); | ||
226 | else | ||
227 | DMINFO("version " DM_HP_HWH_VER " loaded"); | ||
228 | |||
229 | return r; | ||
230 | } | ||
231 | |||
232 | static void __exit hp_sw_exit(void) | ||
233 | { | ||
234 | int r; | ||
235 | |||
236 | r = dm_unregister_hw_handler(&hp_sw_hwh); | ||
237 | if (r < 0) | ||
238 | DMERR("unregister failed %d", r); | ||
239 | } | ||
240 | |||
241 | module_init(hp_sw_init); | ||
242 | module_exit(hp_sw_exit); | ||
243 | |||
244 | MODULE_DESCRIPTION("DM Multipath HP StorageWorks / FSC FibreCat (A/P) support"); | ||
245 | MODULE_AUTHOR("Mike Christie, Dave Wysochanski <dm-devel@redhat.com>"); | ||
246 | MODULE_LICENSE("GPL"); | ||
247 | MODULE_VERSION(DM_HP_HWH_VER); | ||
diff --git a/drivers/md/dm-mpath-rdac.c b/drivers/md/dm-mpath-rdac.c deleted file mode 100644 index 95e77734880a..000000000000 --- a/drivers/md/dm-mpath-rdac.c +++ /dev/null | |||
@@ -1,700 +0,0 @@ | |||
1 | /* | ||
2 | * Engenio/LSI RDAC DM HW handler | ||
3 | * | ||
4 | * Copyright (C) 2005 Mike Christie. All rights reserved. | ||
5 | * Copyright (C) Chandra Seetharaman, IBM Corp. 2007 | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | * | ||
21 | */ | ||
22 | #include <scsi/scsi.h> | ||
23 | #include <scsi/scsi_cmnd.h> | ||
24 | #include <scsi/scsi_eh.h> | ||
25 | |||
26 | #define DM_MSG_PREFIX "multipath rdac" | ||
27 | |||
28 | #include "dm.h" | ||
29 | #include "dm-hw-handler.h" | ||
30 | |||
31 | #define RDAC_DM_HWH_NAME "rdac" | ||
32 | #define RDAC_DM_HWH_VER "0.4" | ||
33 | |||
34 | /* | ||
35 | * LSI mode page stuff | ||
36 | * | ||
37 | * These struct definitions and the forming of the | ||
38 | * mode page were taken from the LSI RDAC 2.4 GPL'd | ||
39 | * driver, and then converted to Linux conventions. | ||
40 | */ | ||
41 | #define RDAC_QUIESCENCE_TIME 20; | ||
42 | /* | ||
43 | * Page Codes | ||
44 | */ | ||
45 | #define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c | ||
46 | |||
47 | /* | ||
48 | * Controller modes definitions | ||
49 | */ | ||
50 | #define RDAC_MODE_TRANSFER_ALL_LUNS 0x01 | ||
51 | #define RDAC_MODE_TRANSFER_SPECIFIED_LUNS 0x02 | ||
52 | |||
53 | /* | ||
54 | * RDAC Options field | ||
55 | */ | ||
56 | #define RDAC_FORCED_QUIESENCE 0x02 | ||
57 | |||
58 | #define RDAC_FAILOVER_TIMEOUT (60 * HZ) | ||
59 | |||
60 | struct rdac_mode_6_hdr { | ||
61 | u8 data_len; | ||
62 | u8 medium_type; | ||
63 | u8 device_params; | ||
64 | u8 block_desc_len; | ||
65 | }; | ||
66 | |||
67 | struct rdac_mode_10_hdr { | ||
68 | u16 data_len; | ||
69 | u8 medium_type; | ||
70 | u8 device_params; | ||
71 | u16 reserved; | ||
72 | u16 block_desc_len; | ||
73 | }; | ||
74 | |||
75 | struct rdac_mode_common { | ||
76 | u8 controller_serial[16]; | ||
77 | u8 alt_controller_serial[16]; | ||
78 | u8 rdac_mode[2]; | ||
79 | u8 alt_rdac_mode[2]; | ||
80 | u8 quiescence_timeout; | ||
81 | u8 rdac_options; | ||
82 | }; | ||
83 | |||
84 | struct rdac_pg_legacy { | ||
85 | struct rdac_mode_6_hdr hdr; | ||
86 | u8 page_code; | ||
87 | u8 page_len; | ||
88 | struct rdac_mode_common common; | ||
89 | #define MODE6_MAX_LUN 32 | ||
90 | u8 lun_table[MODE6_MAX_LUN]; | ||
91 | u8 reserved2[32]; | ||
92 | u8 reserved3; | ||
93 | u8 reserved4; | ||
94 | }; | ||
95 | |||
96 | struct rdac_pg_expanded { | ||
97 | struct rdac_mode_10_hdr hdr; | ||
98 | u8 page_code; | ||
99 | u8 subpage_code; | ||
100 | u8 page_len[2]; | ||
101 | struct rdac_mode_common common; | ||
102 | u8 lun_table[256]; | ||
103 | u8 reserved3; | ||
104 | u8 reserved4; | ||
105 | }; | ||
106 | |||
107 | struct c9_inquiry { | ||
108 | u8 peripheral_info; | ||
109 | u8 page_code; /* 0xC9 */ | ||
110 | u8 reserved1; | ||
111 | u8 page_len; | ||
112 | u8 page_id[4]; /* "vace" */ | ||
113 | u8 avte_cvp; | ||
114 | u8 path_prio; | ||
115 | u8 reserved2[38]; | ||
116 | }; | ||
117 | |||
118 | #define SUBSYS_ID_LEN 16 | ||
119 | #define SLOT_ID_LEN 2 | ||
120 | |||
121 | struct c4_inquiry { | ||
122 | u8 peripheral_info; | ||
123 | u8 page_code; /* 0xC4 */ | ||
124 | u8 reserved1; | ||
125 | u8 page_len; | ||
126 | u8 page_id[4]; /* "subs" */ | ||
127 | u8 subsys_id[SUBSYS_ID_LEN]; | ||
128 | u8 revision[4]; | ||
129 | u8 slot_id[SLOT_ID_LEN]; | ||
130 | u8 reserved[2]; | ||
131 | }; | ||
132 | |||
133 | struct rdac_controller { | ||
134 | u8 subsys_id[SUBSYS_ID_LEN]; | ||
135 | u8 slot_id[SLOT_ID_LEN]; | ||
136 | int use_10_ms; | ||
137 | struct kref kref; | ||
138 | struct list_head node; /* list of all controllers */ | ||
139 | spinlock_t lock; | ||
140 | int submitted; | ||
141 | struct list_head cmd_list; /* list of commands to be submitted */ | ||
142 | union { | ||
143 | struct rdac_pg_legacy legacy; | ||
144 | struct rdac_pg_expanded expanded; | ||
145 | } mode_select; | ||
146 | }; | ||
147 | struct c8_inquiry { | ||
148 | u8 peripheral_info; | ||
149 | u8 page_code; /* 0xC8 */ | ||
150 | u8 reserved1; | ||
151 | u8 page_len; | ||
152 | u8 page_id[4]; /* "edid" */ | ||
153 | u8 reserved2[3]; | ||
154 | u8 vol_uniq_id_len; | ||
155 | u8 vol_uniq_id[16]; | ||
156 | u8 vol_user_label_len; | ||
157 | u8 vol_user_label[60]; | ||
158 | u8 array_uniq_id_len; | ||
159 | u8 array_unique_id[16]; | ||
160 | u8 array_user_label_len; | ||
161 | u8 array_user_label[60]; | ||
162 | u8 lun[8]; | ||
163 | }; | ||
164 | |||
165 | struct c2_inquiry { | ||
166 | u8 peripheral_info; | ||
167 | u8 page_code; /* 0xC2 */ | ||
168 | u8 reserved1; | ||
169 | u8 page_len; | ||
170 | u8 page_id[4]; /* "swr4" */ | ||
171 | u8 sw_version[3]; | ||
172 | u8 sw_date[3]; | ||
173 | u8 features_enabled; | ||
174 | u8 max_lun_supported; | ||
175 | u8 partitions[239]; /* Total allocation length should be 0xFF */ | ||
176 | }; | ||
177 | |||
178 | struct rdac_handler { | ||
179 | struct list_head entry; /* list waiting to submit MODE SELECT */ | ||
180 | unsigned timeout; | ||
181 | struct rdac_controller *ctlr; | ||
182 | #define UNINITIALIZED_LUN (1 << 8) | ||
183 | unsigned lun; | ||
184 | unsigned char sense[SCSI_SENSE_BUFFERSIZE]; | ||
185 | struct dm_path *path; | ||
186 | struct work_struct work; | ||
187 | #define SEND_C2_INQUIRY 1 | ||
188 | #define SEND_C4_INQUIRY 2 | ||
189 | #define SEND_C8_INQUIRY 3 | ||
190 | #define SEND_C9_INQUIRY 4 | ||
191 | #define SEND_MODE_SELECT 5 | ||
192 | int cmd_to_send; | ||
193 | union { | ||
194 | struct c2_inquiry c2; | ||
195 | struct c4_inquiry c4; | ||
196 | struct c8_inquiry c8; | ||
197 | struct c9_inquiry c9; | ||
198 | } inq; | ||
199 | }; | ||
200 | |||
201 | static LIST_HEAD(ctlr_list); | ||
202 | static DEFINE_SPINLOCK(list_lock); | ||
203 | static struct workqueue_struct *rdac_wkqd; | ||
204 | |||
205 | static inline int had_failures(struct request *req, int error) | ||
206 | { | ||
207 | return (error || host_byte(req->errors) != DID_OK || | ||
208 | msg_byte(req->errors) != COMMAND_COMPLETE); | ||
209 | } | ||
210 | |||
211 | static void rdac_resubmit_all(struct rdac_handler *h) | ||
212 | { | ||
213 | struct rdac_controller *ctlr = h->ctlr; | ||
214 | struct rdac_handler *tmp, *h1; | ||
215 | |||
216 | spin_lock(&ctlr->lock); | ||
217 | list_for_each_entry_safe(h1, tmp, &ctlr->cmd_list, entry) { | ||
218 | h1->cmd_to_send = SEND_C9_INQUIRY; | ||
219 | queue_work(rdac_wkqd, &h1->work); | ||
220 | list_del(&h1->entry); | ||
221 | } | ||
222 | ctlr->submitted = 0; | ||
223 | spin_unlock(&ctlr->lock); | ||
224 | } | ||
225 | |||
226 | static void mode_select_endio(struct request *req, int error) | ||
227 | { | ||
228 | struct rdac_handler *h = req->end_io_data; | ||
229 | struct scsi_sense_hdr sense_hdr; | ||
230 | int sense = 0, fail = 0; | ||
231 | |||
232 | if (had_failures(req, error)) { | ||
233 | fail = 1; | ||
234 | goto failed; | ||
235 | } | ||
236 | |||
237 | if (status_byte(req->errors) == CHECK_CONDITION) { | ||
238 | scsi_normalize_sense(req->sense, SCSI_SENSE_BUFFERSIZE, | ||
239 | &sense_hdr); | ||
240 | sense = (sense_hdr.sense_key << 16) | (sense_hdr.asc << 8) | | ||
241 | sense_hdr.ascq; | ||
242 | /* If it is retryable failure, submit the c9 inquiry again */ | ||
243 | if (sense == 0x59136 || sense == 0x68b02 || sense == 0xb8b02 || | ||
244 | sense == 0x62900) { | ||
245 | /* 0x59136 - Command lock contention | ||
246 | * 0x[6b]8b02 - Quiesense in progress or achieved | ||
247 | * 0x62900 - Power On, Reset, or Bus Device Reset | ||
248 | */ | ||
249 | h->cmd_to_send = SEND_C9_INQUIRY; | ||
250 | queue_work(rdac_wkqd, &h->work); | ||
251 | goto done; | ||
252 | } | ||
253 | if (sense) | ||
254 | DMINFO("MODE_SELECT failed on %s with sense 0x%x", | ||
255 | h->path->dev->name, sense); | ||
256 | } | ||
257 | failed: | ||
258 | if (fail || sense) | ||
259 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
260 | else | ||
261 | dm_pg_init_complete(h->path, 0); | ||
262 | |||
263 | done: | ||
264 | rdac_resubmit_all(h); | ||
265 | __blk_put_request(req->q, req); | ||
266 | } | ||
267 | |||
268 | static struct request *get_rdac_req(struct rdac_handler *h, | ||
269 | void *buffer, unsigned buflen, int rw) | ||
270 | { | ||
271 | struct request *rq; | ||
272 | struct request_queue *q = bdev_get_queue(h->path->dev->bdev); | ||
273 | |||
274 | rq = blk_get_request(q, rw, GFP_KERNEL); | ||
275 | |||
276 | if (!rq) { | ||
277 | DMINFO("get_rdac_req: blk_get_request failed"); | ||
278 | return NULL; | ||
279 | } | ||
280 | |||
281 | if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_KERNEL)) { | ||
282 | blk_put_request(rq); | ||
283 | DMINFO("get_rdac_req: blk_rq_map_kern failed"); | ||
284 | return NULL; | ||
285 | } | ||
286 | |||
287 | rq->sense = h->sense; | ||
288 | memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); | ||
289 | rq->sense_len = 0; | ||
290 | |||
291 | rq->end_io_data = h; | ||
292 | rq->timeout = h->timeout; | ||
293 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | ||
294 | rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; | ||
295 | return rq; | ||
296 | } | ||
297 | |||
298 | static struct request *rdac_failover_get(struct rdac_handler *h) | ||
299 | { | ||
300 | struct request *rq; | ||
301 | struct rdac_mode_common *common; | ||
302 | unsigned data_size; | ||
303 | |||
304 | if (h->ctlr->use_10_ms) { | ||
305 | struct rdac_pg_expanded *rdac_pg; | ||
306 | |||
307 | data_size = sizeof(struct rdac_pg_expanded); | ||
308 | rdac_pg = &h->ctlr->mode_select.expanded; | ||
309 | memset(rdac_pg, 0, data_size); | ||
310 | common = &rdac_pg->common; | ||
311 | rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40; | ||
312 | rdac_pg->subpage_code = 0x1; | ||
313 | rdac_pg->page_len[0] = 0x01; | ||
314 | rdac_pg->page_len[1] = 0x28; | ||
315 | rdac_pg->lun_table[h->lun] = 0x81; | ||
316 | } else { | ||
317 | struct rdac_pg_legacy *rdac_pg; | ||
318 | |||
319 | data_size = sizeof(struct rdac_pg_legacy); | ||
320 | rdac_pg = &h->ctlr->mode_select.legacy; | ||
321 | memset(rdac_pg, 0, data_size); | ||
322 | common = &rdac_pg->common; | ||
323 | rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER; | ||
324 | rdac_pg->page_len = 0x68; | ||
325 | rdac_pg->lun_table[h->lun] = 0x81; | ||
326 | } | ||
327 | common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS; | ||
328 | common->quiescence_timeout = RDAC_QUIESCENCE_TIME; | ||
329 | common->rdac_options = RDAC_FORCED_QUIESENCE; | ||
330 | |||
331 | /* get request for block layer packet command */ | ||
332 | rq = get_rdac_req(h, &h->ctlr->mode_select, data_size, WRITE); | ||
333 | if (!rq) { | ||
334 | DMERR("rdac_failover_get: no rq"); | ||
335 | return NULL; | ||
336 | } | ||
337 | |||
338 | /* Prepare the command. */ | ||
339 | if (h->ctlr->use_10_ms) { | ||
340 | rq->cmd[0] = MODE_SELECT_10; | ||
341 | rq->cmd[7] = data_size >> 8; | ||
342 | rq->cmd[8] = data_size & 0xff; | ||
343 | } else { | ||
344 | rq->cmd[0] = MODE_SELECT; | ||
345 | rq->cmd[4] = data_size; | ||
346 | } | ||
347 | rq->cmd_len = COMMAND_SIZE(rq->cmd[0]); | ||
348 | |||
349 | return rq; | ||
350 | } | ||
351 | |||
352 | /* Acquires h->ctlr->lock */ | ||
353 | static void submit_mode_select(struct rdac_handler *h) | ||
354 | { | ||
355 | struct request *rq; | ||
356 | struct request_queue *q = bdev_get_queue(h->path->dev->bdev); | ||
357 | |||
358 | spin_lock(&h->ctlr->lock); | ||
359 | if (h->ctlr->submitted) { | ||
360 | list_add(&h->entry, &h->ctlr->cmd_list); | ||
361 | goto drop_lock; | ||
362 | } | ||
363 | |||
364 | if (!q) { | ||
365 | DMINFO("submit_mode_select: no queue"); | ||
366 | goto fail_path; | ||
367 | } | ||
368 | |||
369 | rq = rdac_failover_get(h); | ||
370 | if (!rq) { | ||
371 | DMERR("submit_mode_select: no rq"); | ||
372 | goto fail_path; | ||
373 | } | ||
374 | |||
375 | DMINFO("queueing MODE_SELECT command on %s", h->path->dev->name); | ||
376 | |||
377 | blk_execute_rq_nowait(q, NULL, rq, 1, mode_select_endio); | ||
378 | h->ctlr->submitted = 1; | ||
379 | goto drop_lock; | ||
380 | fail_path: | ||
381 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
382 | drop_lock: | ||
383 | spin_unlock(&h->ctlr->lock); | ||
384 | } | ||
385 | |||
386 | static void release_ctlr(struct kref *kref) | ||
387 | { | ||
388 | struct rdac_controller *ctlr; | ||
389 | ctlr = container_of(kref, struct rdac_controller, kref); | ||
390 | |||
391 | spin_lock(&list_lock); | ||
392 | list_del(&ctlr->node); | ||
393 | spin_unlock(&list_lock); | ||
394 | kfree(ctlr); | ||
395 | } | ||
396 | |||
397 | static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id) | ||
398 | { | ||
399 | struct rdac_controller *ctlr, *tmp; | ||
400 | |||
401 | spin_lock(&list_lock); | ||
402 | |||
403 | list_for_each_entry(tmp, &ctlr_list, node) { | ||
404 | if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) && | ||
405 | (memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) { | ||
406 | kref_get(&tmp->kref); | ||
407 | spin_unlock(&list_lock); | ||
408 | return tmp; | ||
409 | } | ||
410 | } | ||
411 | ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC); | ||
412 | if (!ctlr) | ||
413 | goto done; | ||
414 | |||
415 | /* initialize fields of controller */ | ||
416 | memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN); | ||
417 | memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN); | ||
418 | kref_init(&ctlr->kref); | ||
419 | spin_lock_init(&ctlr->lock); | ||
420 | ctlr->submitted = 0; | ||
421 | ctlr->use_10_ms = -1; | ||
422 | INIT_LIST_HEAD(&ctlr->cmd_list); | ||
423 | list_add(&ctlr->node, &ctlr_list); | ||
424 | done: | ||
425 | spin_unlock(&list_lock); | ||
426 | return ctlr; | ||
427 | } | ||
428 | |||
429 | static void c4_endio(struct request *req, int error) | ||
430 | { | ||
431 | struct rdac_handler *h = req->end_io_data; | ||
432 | struct c4_inquiry *sp; | ||
433 | |||
434 | if (had_failures(req, error)) { | ||
435 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
436 | goto done; | ||
437 | } | ||
438 | |||
439 | sp = &h->inq.c4; | ||
440 | |||
441 | h->ctlr = get_controller(sp->subsys_id, sp->slot_id); | ||
442 | |||
443 | if (h->ctlr) { | ||
444 | h->cmd_to_send = SEND_C9_INQUIRY; | ||
445 | queue_work(rdac_wkqd, &h->work); | ||
446 | } else | ||
447 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
448 | done: | ||
449 | __blk_put_request(req->q, req); | ||
450 | } | ||
451 | |||
452 | static void c2_endio(struct request *req, int error) | ||
453 | { | ||
454 | struct rdac_handler *h = req->end_io_data; | ||
455 | struct c2_inquiry *sp; | ||
456 | |||
457 | if (had_failures(req, error)) { | ||
458 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
459 | goto done; | ||
460 | } | ||
461 | |||
462 | sp = &h->inq.c2; | ||
463 | |||
464 | /* If more than MODE6_MAX_LUN luns are supported, use mode select 10 */ | ||
465 | if (sp->max_lun_supported >= MODE6_MAX_LUN) | ||
466 | h->ctlr->use_10_ms = 1; | ||
467 | else | ||
468 | h->ctlr->use_10_ms = 0; | ||
469 | |||
470 | h->cmd_to_send = SEND_MODE_SELECT; | ||
471 | queue_work(rdac_wkqd, &h->work); | ||
472 | done: | ||
473 | __blk_put_request(req->q, req); | ||
474 | } | ||
475 | |||
476 | static void c9_endio(struct request *req, int error) | ||
477 | { | ||
478 | struct rdac_handler *h = req->end_io_data; | ||
479 | struct c9_inquiry *sp; | ||
480 | |||
481 | if (had_failures(req, error)) { | ||
482 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
483 | goto done; | ||
484 | } | ||
485 | |||
486 | /* We need to look at the sense keys here to take clear action. | ||
487 | * For now simple logic: If the host is in AVT mode or if controller | ||
488 | * owns the lun, return dm_pg_init_complete(), otherwise submit | ||
489 | * MODE SELECT. | ||
490 | */ | ||
491 | sp = &h->inq.c9; | ||
492 | |||
493 | /* If in AVT mode, return success */ | ||
494 | if ((sp->avte_cvp >> 7) == 0x1) { | ||
495 | dm_pg_init_complete(h->path, 0); | ||
496 | goto done; | ||
497 | } | ||
498 | |||
499 | /* If the controller on this path owns the LUN, return success */ | ||
500 | if (sp->avte_cvp & 0x1) { | ||
501 | dm_pg_init_complete(h->path, 0); | ||
502 | goto done; | ||
503 | } | ||
504 | |||
505 | if (h->ctlr) { | ||
506 | if (h->ctlr->use_10_ms == -1) | ||
507 | h->cmd_to_send = SEND_C2_INQUIRY; | ||
508 | else | ||
509 | h->cmd_to_send = SEND_MODE_SELECT; | ||
510 | } else | ||
511 | h->cmd_to_send = SEND_C4_INQUIRY; | ||
512 | queue_work(rdac_wkqd, &h->work); | ||
513 | done: | ||
514 | __blk_put_request(req->q, req); | ||
515 | } | ||
516 | |||
517 | static void c8_endio(struct request *req, int error) | ||
518 | { | ||
519 | struct rdac_handler *h = req->end_io_data; | ||
520 | struct c8_inquiry *sp; | ||
521 | |||
522 | if (had_failures(req, error)) { | ||
523 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
524 | goto done; | ||
525 | } | ||
526 | |||
527 | /* We need to look at the sense keys here to take clear action. | ||
528 | * For now simple logic: Get the lun from the inquiry page. | ||
529 | */ | ||
530 | sp = &h->inq.c8; | ||
531 | h->lun = sp->lun[7]; /* currently it uses only one byte */ | ||
532 | h->cmd_to_send = SEND_C9_INQUIRY; | ||
533 | queue_work(rdac_wkqd, &h->work); | ||
534 | done: | ||
535 | __blk_put_request(req->q, req); | ||
536 | } | ||
537 | |||
538 | static void submit_inquiry(struct rdac_handler *h, int page_code, | ||
539 | unsigned int len, rq_end_io_fn endio) | ||
540 | { | ||
541 | struct request *rq; | ||
542 | struct request_queue *q = bdev_get_queue(h->path->dev->bdev); | ||
543 | |||
544 | if (!q) | ||
545 | goto fail_path; | ||
546 | |||
547 | rq = get_rdac_req(h, &h->inq, len, READ); | ||
548 | if (!rq) | ||
549 | goto fail_path; | ||
550 | |||
551 | /* Prepare the command. */ | ||
552 | rq->cmd[0] = INQUIRY; | ||
553 | rq->cmd[1] = 1; | ||
554 | rq->cmd[2] = page_code; | ||
555 | rq->cmd[4] = len; | ||
556 | rq->cmd_len = COMMAND_SIZE(INQUIRY); | ||
557 | blk_execute_rq_nowait(q, NULL, rq, 1, endio); | ||
558 | return; | ||
559 | |||
560 | fail_path: | ||
561 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
562 | } | ||
563 | |||
564 | static void service_wkq(struct work_struct *work) | ||
565 | { | ||
566 | struct rdac_handler *h = container_of(work, struct rdac_handler, work); | ||
567 | |||
568 | switch (h->cmd_to_send) { | ||
569 | case SEND_C2_INQUIRY: | ||
570 | submit_inquiry(h, 0xC2, sizeof(struct c2_inquiry), c2_endio); | ||
571 | break; | ||
572 | case SEND_C4_INQUIRY: | ||
573 | submit_inquiry(h, 0xC4, sizeof(struct c4_inquiry), c4_endio); | ||
574 | break; | ||
575 | case SEND_C8_INQUIRY: | ||
576 | submit_inquiry(h, 0xC8, sizeof(struct c8_inquiry), c8_endio); | ||
577 | break; | ||
578 | case SEND_C9_INQUIRY: | ||
579 | submit_inquiry(h, 0xC9, sizeof(struct c9_inquiry), c9_endio); | ||
580 | break; | ||
581 | case SEND_MODE_SELECT: | ||
582 | submit_mode_select(h); | ||
583 | break; | ||
584 | default: | ||
585 | BUG(); | ||
586 | } | ||
587 | } | ||
588 | /* | ||
589 | * only support subpage2c until we confirm that this is just a matter of | ||
590 | * of updating firmware or not, and RDAC (basic AVT works already) for now | ||
591 | * but we can add these in in when we get time and testers | ||
592 | */ | ||
593 | static int rdac_create(struct hw_handler *hwh, unsigned argc, char **argv) | ||
594 | { | ||
595 | struct rdac_handler *h; | ||
596 | unsigned timeout; | ||
597 | |||
598 | if (argc == 0) { | ||
599 | /* No arguments: use defaults */ | ||
600 | timeout = RDAC_FAILOVER_TIMEOUT; | ||
601 | } else if (argc != 1) { | ||
602 | DMWARN("incorrect number of arguments"); | ||
603 | return -EINVAL; | ||
604 | } else { | ||
605 | if (sscanf(argv[1], "%u", &timeout) != 1) { | ||
606 | DMWARN("invalid timeout value"); | ||
607 | return -EINVAL; | ||
608 | } | ||
609 | } | ||
610 | |||
611 | h = kzalloc(sizeof(*h), GFP_KERNEL); | ||
612 | if (!h) | ||
613 | return -ENOMEM; | ||
614 | |||
615 | hwh->context = h; | ||
616 | h->timeout = timeout; | ||
617 | h->lun = UNINITIALIZED_LUN; | ||
618 | INIT_WORK(&h->work, service_wkq); | ||
619 | DMWARN("using RDAC command with timeout %u", h->timeout); | ||
620 | |||
621 | return 0; | ||
622 | } | ||
623 | |||
624 | static void rdac_destroy(struct hw_handler *hwh) | ||
625 | { | ||
626 | struct rdac_handler *h = hwh->context; | ||
627 | |||
628 | if (h->ctlr) | ||
629 | kref_put(&h->ctlr->kref, release_ctlr); | ||
630 | kfree(h); | ||
631 | hwh->context = NULL; | ||
632 | } | ||
633 | |||
634 | static unsigned rdac_error(struct hw_handler *hwh, struct bio *bio) | ||
635 | { | ||
636 | /* Try default handler */ | ||
637 | return dm_scsi_err_handler(hwh, bio); | ||
638 | } | ||
639 | |||
640 | static void rdac_pg_init(struct hw_handler *hwh, unsigned bypassed, | ||
641 | struct dm_path *path) | ||
642 | { | ||
643 | struct rdac_handler *h = hwh->context; | ||
644 | |||
645 | h->path = path; | ||
646 | switch (h->lun) { | ||
647 | case UNINITIALIZED_LUN: | ||
648 | submit_inquiry(h, 0xC8, sizeof(struct c8_inquiry), c8_endio); | ||
649 | break; | ||
650 | default: | ||
651 | submit_inquiry(h, 0xC9, sizeof(struct c9_inquiry), c9_endio); | ||
652 | } | ||
653 | } | ||
654 | |||
655 | static struct hw_handler_type rdac_handler = { | ||
656 | .name = RDAC_DM_HWH_NAME, | ||
657 | .module = THIS_MODULE, | ||
658 | .create = rdac_create, | ||
659 | .destroy = rdac_destroy, | ||
660 | .pg_init = rdac_pg_init, | ||
661 | .error = rdac_error, | ||
662 | }; | ||
663 | |||
664 | static int __init rdac_init(void) | ||
665 | { | ||
666 | int r; | ||
667 | |||
668 | rdac_wkqd = create_singlethread_workqueue("rdac_wkqd"); | ||
669 | if (!rdac_wkqd) { | ||
670 | DMERR("Failed to create workqueue rdac_wkqd."); | ||
671 | return -ENOMEM; | ||
672 | } | ||
673 | |||
674 | r = dm_register_hw_handler(&rdac_handler); | ||
675 | if (r < 0) { | ||
676 | DMERR("%s: register failed %d", RDAC_DM_HWH_NAME, r); | ||
677 | destroy_workqueue(rdac_wkqd); | ||
678 | return r; | ||
679 | } | ||
680 | |||
681 | DMINFO("%s: version %s loaded", RDAC_DM_HWH_NAME, RDAC_DM_HWH_VER); | ||
682 | return 0; | ||
683 | } | ||
684 | |||
685 | static void __exit rdac_exit(void) | ||
686 | { | ||
687 | int r = dm_unregister_hw_handler(&rdac_handler); | ||
688 | |||
689 | destroy_workqueue(rdac_wkqd); | ||
690 | if (r < 0) | ||
691 | DMERR("%s: unregister failed %d", RDAC_DM_HWH_NAME, r); | ||
692 | } | ||
693 | |||
694 | module_init(rdac_init); | ||
695 | module_exit(rdac_exit); | ||
696 | |||
697 | MODULE_DESCRIPTION("DM Multipath LSI/Engenio RDAC support"); | ||
698 | MODULE_AUTHOR("Mike Christie, Chandra Seetharaman"); | ||
699 | MODULE_LICENSE("GPL"); | ||
700 | MODULE_VERSION(RDAC_DM_HWH_VER); | ||