diff options
author | Chandra Seetharaman <sekharan@us.ibm.com> | 2007-07-12 12:30:05 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-12 18:01:23 -0400 |
commit | dd172d72addefd89795e819cc2cc3eb1b9d12a7f (patch) | |
tree | 203edc569ff9b1393af5a33ed62f6ceae5c5a4c7 /drivers/md/dm-mpath-rdac.c | |
parent | fc1ff9588a6d56258ff9576a31aa34f17757c666 (diff) |
dm mpath: rdac
This patch supports LSI/Engenio devices in RDAC mode. Like dm-emc
it requires userspace support. In your multipath.conf file you must have:
path_checker rdac
hardware_handler "1 rdac"
prio_callout "/sbin/mpath_prio_tpc /dev/%n"
And you also then must have a updated multipath tools release which
has rdac support.
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/md/dm-mpath-rdac.c')
-rw-r--r-- | drivers/md/dm-mpath-rdac.c | 700 |
1 files changed, 700 insertions, 0 deletions
diff --git a/drivers/md/dm-mpath-rdac.c b/drivers/md/dm-mpath-rdac.c new file mode 100644 index 00000000000..8b776b8cb7f --- /dev/null +++ b/drivers/md/dm-mpath-rdac.c | |||
@@ -0,0 +1,700 @@ | |||
1 | /* | ||
2 | * Engenio/LSI RDAC DM HW handler | ||
3 | * | ||
4 | * Copyright (C) 2005 Mike Christie. All rights reserved. | ||
5 | * Copyright (C) Chandra Seetharaman, IBM Corp. 2007 | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | * | ||
21 | */ | ||
22 | #include <scsi/scsi.h> | ||
23 | #include <scsi/scsi_cmnd.h> | ||
24 | #include <scsi/scsi_eh.h> | ||
25 | |||
26 | #define DM_MSG_PREFIX "multipath rdac" | ||
27 | |||
28 | #include "dm.h" | ||
29 | #include "dm-hw-handler.h" | ||
30 | |||
31 | #define RDAC_DM_HWH_NAME "rdac" | ||
32 | #define RDAC_DM_HWH_VER "0.4" | ||
33 | |||
34 | /* | ||
35 | * LSI mode page stuff | ||
36 | * | ||
37 | * These struct definitions and the forming of the | ||
38 | * mode page were taken from the LSI RDAC 2.4 GPL'd | ||
39 | * driver, and then converted to Linux conventions. | ||
40 | */ | ||
41 | #define RDAC_QUIESCENCE_TIME 20; | ||
42 | /* | ||
43 | * Page Codes | ||
44 | */ | ||
45 | #define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c | ||
46 | |||
47 | /* | ||
48 | * Controller modes definitions | ||
49 | */ | ||
50 | #define RDAC_MODE_TRANSFER_ALL_LUNS 0x01 | ||
51 | #define RDAC_MODE_TRANSFER_SPECIFIED_LUNS 0x02 | ||
52 | |||
53 | /* | ||
54 | * RDAC Options field | ||
55 | */ | ||
56 | #define RDAC_FORCED_QUIESENCE 0x02 | ||
57 | |||
58 | #define RDAC_FAILOVER_TIMEOUT (60 * HZ) | ||
59 | |||
60 | struct rdac_mode_6_hdr { | ||
61 | u8 data_len; | ||
62 | u8 medium_type; | ||
63 | u8 device_params; | ||
64 | u8 block_desc_len; | ||
65 | }; | ||
66 | |||
67 | struct rdac_mode_10_hdr { | ||
68 | u16 data_len; | ||
69 | u8 medium_type; | ||
70 | u8 device_params; | ||
71 | u16 reserved; | ||
72 | u16 block_desc_len; | ||
73 | }; | ||
74 | |||
75 | struct rdac_mode_common { | ||
76 | u8 controller_serial[16]; | ||
77 | u8 alt_controller_serial[16]; | ||
78 | u8 rdac_mode[2]; | ||
79 | u8 alt_rdac_mode[2]; | ||
80 | u8 quiescence_timeout; | ||
81 | u8 rdac_options; | ||
82 | }; | ||
83 | |||
84 | struct rdac_pg_legacy { | ||
85 | struct rdac_mode_6_hdr hdr; | ||
86 | u8 page_code; | ||
87 | u8 page_len; | ||
88 | struct rdac_mode_common common; | ||
89 | #define MODE6_MAX_LUN 32 | ||
90 | u8 lun_table[MODE6_MAX_LUN]; | ||
91 | u8 reserved2[32]; | ||
92 | u8 reserved3; | ||
93 | u8 reserved4; | ||
94 | }; | ||
95 | |||
96 | struct rdac_pg_expanded { | ||
97 | struct rdac_mode_10_hdr hdr; | ||
98 | u8 page_code; | ||
99 | u8 subpage_code; | ||
100 | u8 page_len[2]; | ||
101 | struct rdac_mode_common common; | ||
102 | u8 lun_table[256]; | ||
103 | u8 reserved3; | ||
104 | u8 reserved4; | ||
105 | }; | ||
106 | |||
107 | struct c9_inquiry { | ||
108 | u8 peripheral_info; | ||
109 | u8 page_code; /* 0xC9 */ | ||
110 | u8 reserved1; | ||
111 | u8 page_len; | ||
112 | u8 page_id[4]; /* "vace" */ | ||
113 | u8 avte_cvp; | ||
114 | u8 path_prio; | ||
115 | u8 reserved2[38]; | ||
116 | }; | ||
117 | |||
118 | #define SUBSYS_ID_LEN 16 | ||
119 | #define SLOT_ID_LEN 2 | ||
120 | |||
121 | struct c4_inquiry { | ||
122 | u8 peripheral_info; | ||
123 | u8 page_code; /* 0xC4 */ | ||
124 | u8 reserved1; | ||
125 | u8 page_len; | ||
126 | u8 page_id[4]; /* "subs" */ | ||
127 | u8 subsys_id[SUBSYS_ID_LEN]; | ||
128 | u8 revision[4]; | ||
129 | u8 slot_id[SLOT_ID_LEN]; | ||
130 | u8 reserved[2]; | ||
131 | }; | ||
132 | |||
133 | struct rdac_controller { | ||
134 | u8 subsys_id[SUBSYS_ID_LEN]; | ||
135 | u8 slot_id[SLOT_ID_LEN]; | ||
136 | int use_10_ms; | ||
137 | struct kref kref; | ||
138 | struct list_head node; /* list of all controllers */ | ||
139 | spinlock_t lock; | ||
140 | int submitted; | ||
141 | struct list_head cmd_list; /* list of commands to be submitted */ | ||
142 | union { | ||
143 | struct rdac_pg_legacy legacy; | ||
144 | struct rdac_pg_expanded expanded; | ||
145 | } mode_select; | ||
146 | }; | ||
147 | struct c8_inquiry { | ||
148 | u8 peripheral_info; | ||
149 | u8 page_code; /* 0xC8 */ | ||
150 | u8 reserved1; | ||
151 | u8 page_len; | ||
152 | u8 page_id[4]; /* "edid" */ | ||
153 | u8 reserved2[3]; | ||
154 | u8 vol_uniq_id_len; | ||
155 | u8 vol_uniq_id[16]; | ||
156 | u8 vol_user_label_len; | ||
157 | u8 vol_user_label[60]; | ||
158 | u8 array_uniq_id_len; | ||
159 | u8 array_unique_id[16]; | ||
160 | u8 array_user_label_len; | ||
161 | u8 array_user_label[60]; | ||
162 | u8 lun[8]; | ||
163 | }; | ||
164 | |||
165 | struct c2_inquiry { | ||
166 | u8 peripheral_info; | ||
167 | u8 page_code; /* 0xC2 */ | ||
168 | u8 reserved1; | ||
169 | u8 page_len; | ||
170 | u8 page_id[4]; /* "swr4" */ | ||
171 | u8 sw_version[3]; | ||
172 | u8 sw_date[3]; | ||
173 | u8 features_enabled; | ||
174 | u8 max_lun_supported; | ||
175 | u8 partitions[239]; /* Total allocation length should be 0xFF */ | ||
176 | }; | ||
177 | |||
178 | struct rdac_handler { | ||
179 | struct list_head entry; /* list waiting to submit MODE SELECT */ | ||
180 | unsigned timeout; | ||
181 | struct rdac_controller *ctlr; | ||
182 | #define UNINITIALIZED_LUN (1 << 8) | ||
183 | unsigned lun; | ||
184 | unsigned char sense[SCSI_SENSE_BUFFERSIZE]; | ||
185 | struct dm_path *path; | ||
186 | struct work_struct work; | ||
187 | #define SEND_C2_INQUIRY 1 | ||
188 | #define SEND_C4_INQUIRY 2 | ||
189 | #define SEND_C8_INQUIRY 3 | ||
190 | #define SEND_C9_INQUIRY 4 | ||
191 | #define SEND_MODE_SELECT 5 | ||
192 | int cmd_to_send; | ||
193 | union { | ||
194 | struct c2_inquiry c2; | ||
195 | struct c4_inquiry c4; | ||
196 | struct c8_inquiry c8; | ||
197 | struct c9_inquiry c9; | ||
198 | } inq; | ||
199 | }; | ||
200 | |||
201 | static LIST_HEAD(ctlr_list); | ||
202 | static DEFINE_SPINLOCK(list_lock); | ||
203 | static struct workqueue_struct *rdac_wkqd; | ||
204 | |||
205 | static inline int had_failures(struct request *req, int error) | ||
206 | { | ||
207 | return (error || host_byte(req->errors) != DID_OK || | ||
208 | msg_byte(req->errors) != COMMAND_COMPLETE); | ||
209 | } | ||
210 | |||
211 | static void rdac_resubmit_all(struct rdac_handler *h) | ||
212 | { | ||
213 | struct rdac_controller *ctlr = h->ctlr; | ||
214 | struct rdac_handler *tmp, *h1; | ||
215 | |||
216 | spin_lock(&ctlr->lock); | ||
217 | list_for_each_entry_safe(h1, tmp, &ctlr->cmd_list, entry) { | ||
218 | h1->cmd_to_send = SEND_C9_INQUIRY; | ||
219 | queue_work(rdac_wkqd, &h1->work); | ||
220 | list_del(&h1->entry); | ||
221 | } | ||
222 | ctlr->submitted = 0; | ||
223 | spin_unlock(&ctlr->lock); | ||
224 | } | ||
225 | |||
226 | static void mode_select_endio(struct request *req, int error) | ||
227 | { | ||
228 | struct rdac_handler *h = req->end_io_data; | ||
229 | struct scsi_sense_hdr sense_hdr; | ||
230 | int sense = 0, fail = 0; | ||
231 | |||
232 | if (had_failures(req, error)) { | ||
233 | fail = 1; | ||
234 | goto failed; | ||
235 | } | ||
236 | |||
237 | if (status_byte(req->errors) == CHECK_CONDITION) { | ||
238 | scsi_normalize_sense(req->sense, SCSI_SENSE_BUFFERSIZE, | ||
239 | &sense_hdr); | ||
240 | sense = (sense_hdr.sense_key << 16) | (sense_hdr.asc << 8) | | ||
241 | sense_hdr.ascq; | ||
242 | /* If it is retryable failure, submit the c9 inquiry again */ | ||
243 | if (sense == 0x59136 || sense == 0x68b02 || sense == 0xb8b02 || | ||
244 | sense == 0x62900) { | ||
245 | /* 0x59136 - Command lock contention | ||
246 | * 0x[6b]8b02 - Quiesense in progress or achieved | ||
247 | * 0x62900 - Power On, Reset, or Bus Device Reset | ||
248 | */ | ||
249 | h->cmd_to_send = SEND_C9_INQUIRY; | ||
250 | queue_work(rdac_wkqd, &h->work); | ||
251 | goto done; | ||
252 | } | ||
253 | if (sense) | ||
254 | DMINFO("MODE_SELECT failed on %s with sense 0x%x", | ||
255 | h->path->dev->name, sense); | ||
256 | } | ||
257 | failed: | ||
258 | if (fail || sense) | ||
259 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
260 | else | ||
261 | dm_pg_init_complete(h->path, 0); | ||
262 | |||
263 | done: | ||
264 | rdac_resubmit_all(h); | ||
265 | __blk_put_request(req->q, req); | ||
266 | } | ||
267 | |||
268 | static struct request *get_rdac_req(struct rdac_handler *h, | ||
269 | void *buffer, unsigned buflen, int rw) | ||
270 | { | ||
271 | struct request *rq; | ||
272 | struct request_queue *q = bdev_get_queue(h->path->dev->bdev); | ||
273 | |||
274 | rq = blk_get_request(q, rw, GFP_KERNEL); | ||
275 | |||
276 | if (!rq) { | ||
277 | DMINFO("get_rdac_req: blk_get_request failed"); | ||
278 | return NULL; | ||
279 | } | ||
280 | |||
281 | if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_KERNEL)) { | ||
282 | blk_put_request(rq); | ||
283 | DMINFO("get_rdac_req: blk_rq_map_kern failed"); | ||
284 | return NULL; | ||
285 | } | ||
286 | |||
287 | memset(&rq->cmd, 0, BLK_MAX_CDB); | ||
288 | rq->sense = h->sense; | ||
289 | memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); | ||
290 | rq->sense_len = 0; | ||
291 | |||
292 | rq->end_io_data = h; | ||
293 | rq->timeout = h->timeout; | ||
294 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | ||
295 | rq->cmd_flags = REQ_FAILFAST | REQ_NOMERGE; | ||
296 | return rq; | ||
297 | } | ||
298 | |||
299 | static struct request *rdac_failover_get(struct rdac_handler *h) | ||
300 | { | ||
301 | struct request *rq; | ||
302 | struct rdac_mode_common *common; | ||
303 | unsigned data_size; | ||
304 | |||
305 | if (h->ctlr->use_10_ms) { | ||
306 | struct rdac_pg_expanded *rdac_pg; | ||
307 | |||
308 | data_size = sizeof(struct rdac_pg_expanded); | ||
309 | rdac_pg = &h->ctlr->mode_select.expanded; | ||
310 | memset(rdac_pg, 0, data_size); | ||
311 | common = &rdac_pg->common; | ||
312 | rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40; | ||
313 | rdac_pg->subpage_code = 0x1; | ||
314 | rdac_pg->page_len[0] = 0x01; | ||
315 | rdac_pg->page_len[1] = 0x28; | ||
316 | rdac_pg->lun_table[h->lun] = 0x81; | ||
317 | } else { | ||
318 | struct rdac_pg_legacy *rdac_pg; | ||
319 | |||
320 | data_size = sizeof(struct rdac_pg_legacy); | ||
321 | rdac_pg = &h->ctlr->mode_select.legacy; | ||
322 | memset(rdac_pg, 0, data_size); | ||
323 | common = &rdac_pg->common; | ||
324 | rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER; | ||
325 | rdac_pg->page_len = 0x68; | ||
326 | rdac_pg->lun_table[h->lun] = 0x81; | ||
327 | } | ||
328 | common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS; | ||
329 | common->quiescence_timeout = RDAC_QUIESCENCE_TIME; | ||
330 | common->rdac_options = RDAC_FORCED_QUIESENCE; | ||
331 | |||
332 | /* get request for block layer packet command */ | ||
333 | rq = get_rdac_req(h, &h->ctlr->mode_select, data_size, WRITE); | ||
334 | if (!rq) { | ||
335 | DMERR("rdac_failover_get: no rq"); | ||
336 | return NULL; | ||
337 | } | ||
338 | |||
339 | /* Prepare the command. */ | ||
340 | if (h->ctlr->use_10_ms) { | ||
341 | rq->cmd[0] = MODE_SELECT_10; | ||
342 | rq->cmd[7] = data_size >> 8; | ||
343 | rq->cmd[8] = data_size & 0xff; | ||
344 | } else { | ||
345 | rq->cmd[0] = MODE_SELECT; | ||
346 | rq->cmd[4] = data_size; | ||
347 | } | ||
348 | rq->cmd_len = COMMAND_SIZE(rq->cmd[0]); | ||
349 | |||
350 | return rq; | ||
351 | } | ||
352 | |||
353 | /* Acquires h->ctlr->lock */ | ||
354 | static void submit_mode_select(struct rdac_handler *h) | ||
355 | { | ||
356 | struct request *rq; | ||
357 | struct request_queue *q = bdev_get_queue(h->path->dev->bdev); | ||
358 | |||
359 | spin_lock(&h->ctlr->lock); | ||
360 | if (h->ctlr->submitted) { | ||
361 | list_add(&h->entry, &h->ctlr->cmd_list); | ||
362 | goto drop_lock; | ||
363 | } | ||
364 | |||
365 | if (!q) { | ||
366 | DMINFO("submit_mode_select: no queue"); | ||
367 | goto fail_path; | ||
368 | } | ||
369 | |||
370 | rq = rdac_failover_get(h); | ||
371 | if (!rq) { | ||
372 | DMERR("submit_mode_select: no rq"); | ||
373 | goto fail_path; | ||
374 | } | ||
375 | |||
376 | DMINFO("queueing MODE_SELECT command on %s", h->path->dev->name); | ||
377 | |||
378 | blk_execute_rq_nowait(q, NULL, rq, 1, mode_select_endio); | ||
379 | h->ctlr->submitted = 1; | ||
380 | goto drop_lock; | ||
381 | fail_path: | ||
382 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
383 | drop_lock: | ||
384 | spin_unlock(&h->ctlr->lock); | ||
385 | } | ||
386 | |||
387 | static void release_ctlr(struct kref *kref) | ||
388 | { | ||
389 | struct rdac_controller *ctlr; | ||
390 | ctlr = container_of(kref, struct rdac_controller, kref); | ||
391 | |||
392 | spin_lock(&list_lock); | ||
393 | list_del(&ctlr->node); | ||
394 | spin_unlock(&list_lock); | ||
395 | kfree(ctlr); | ||
396 | } | ||
397 | |||
398 | static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id) | ||
399 | { | ||
400 | struct rdac_controller *ctlr, *tmp; | ||
401 | |||
402 | spin_lock(&list_lock); | ||
403 | |||
404 | list_for_each_entry(tmp, &ctlr_list, node) { | ||
405 | if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) && | ||
406 | (memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) { | ||
407 | kref_get(&tmp->kref); | ||
408 | spin_unlock(&list_lock); | ||
409 | return tmp; | ||
410 | } | ||
411 | } | ||
412 | ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC); | ||
413 | if (!ctlr) | ||
414 | goto done; | ||
415 | |||
416 | /* initialize fields of controller */ | ||
417 | memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN); | ||
418 | memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN); | ||
419 | kref_init(&ctlr->kref); | ||
420 | spin_lock_init(&ctlr->lock); | ||
421 | ctlr->submitted = 0; | ||
422 | ctlr->use_10_ms = -1; | ||
423 | INIT_LIST_HEAD(&ctlr->cmd_list); | ||
424 | list_add(&ctlr->node, &ctlr_list); | ||
425 | done: | ||
426 | spin_unlock(&list_lock); | ||
427 | return ctlr; | ||
428 | } | ||
429 | |||
430 | static void c4_endio(struct request *req, int error) | ||
431 | { | ||
432 | struct rdac_handler *h = req->end_io_data; | ||
433 | struct c4_inquiry *sp; | ||
434 | |||
435 | if (had_failures(req, error)) { | ||
436 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
437 | goto done; | ||
438 | } | ||
439 | |||
440 | sp = &h->inq.c4; | ||
441 | |||
442 | h->ctlr = get_controller(sp->subsys_id, sp->slot_id); | ||
443 | |||
444 | if (h->ctlr) { | ||
445 | h->cmd_to_send = SEND_C9_INQUIRY; | ||
446 | queue_work(rdac_wkqd, &h->work); | ||
447 | } else | ||
448 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
449 | done: | ||
450 | __blk_put_request(req->q, req); | ||
451 | } | ||
452 | |||
453 | static void c2_endio(struct request *req, int error) | ||
454 | { | ||
455 | struct rdac_handler *h = req->end_io_data; | ||
456 | struct c2_inquiry *sp; | ||
457 | |||
458 | if (had_failures(req, error)) { | ||
459 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
460 | goto done; | ||
461 | } | ||
462 | |||
463 | sp = &h->inq.c2; | ||
464 | |||
465 | /* If more than MODE6_MAX_LUN luns are supported, use mode select 10 */ | ||
466 | if (sp->max_lun_supported >= MODE6_MAX_LUN) | ||
467 | h->ctlr->use_10_ms = 1; | ||
468 | else | ||
469 | h->ctlr->use_10_ms = 0; | ||
470 | |||
471 | h->cmd_to_send = SEND_MODE_SELECT; | ||
472 | queue_work(rdac_wkqd, &h->work); | ||
473 | done: | ||
474 | __blk_put_request(req->q, req); | ||
475 | } | ||
476 | |||
477 | static void c9_endio(struct request *req, int error) | ||
478 | { | ||
479 | struct rdac_handler *h = req->end_io_data; | ||
480 | struct c9_inquiry *sp; | ||
481 | |||
482 | if (had_failures(req, error)) { | ||
483 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
484 | goto done; | ||
485 | } | ||
486 | |||
487 | /* We need to look at the sense keys here to take clear action. | ||
488 | * For now simple logic: If the host is in AVT mode or if controller | ||
489 | * owns the lun, return dm_pg_init_complete(), otherwise submit | ||
490 | * MODE SELECT. | ||
491 | */ | ||
492 | sp = &h->inq.c9; | ||
493 | |||
494 | /* If in AVT mode, return success */ | ||
495 | if ((sp->avte_cvp >> 7) == 0x1) { | ||
496 | dm_pg_init_complete(h->path, 0); | ||
497 | goto done; | ||
498 | } | ||
499 | |||
500 | /* If the controller on this path owns the LUN, return success */ | ||
501 | if (sp->avte_cvp & 0x1) { | ||
502 | dm_pg_init_complete(h->path, 0); | ||
503 | goto done; | ||
504 | } | ||
505 | |||
506 | if (h->ctlr) { | ||
507 | if (h->ctlr->use_10_ms == -1) | ||
508 | h->cmd_to_send = SEND_C2_INQUIRY; | ||
509 | else | ||
510 | h->cmd_to_send = SEND_MODE_SELECT; | ||
511 | } else | ||
512 | h->cmd_to_send = SEND_C4_INQUIRY; | ||
513 | queue_work(rdac_wkqd, &h->work); | ||
514 | done: | ||
515 | __blk_put_request(req->q, req); | ||
516 | } | ||
517 | |||
518 | static void c8_endio(struct request *req, int error) | ||
519 | { | ||
520 | struct rdac_handler *h = req->end_io_data; | ||
521 | struct c8_inquiry *sp; | ||
522 | |||
523 | if (had_failures(req, error)) { | ||
524 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
525 | goto done; | ||
526 | } | ||
527 | |||
528 | /* We need to look at the sense keys here to take clear action. | ||
529 | * For now simple logic: Get the lun from the inquiry page. | ||
530 | */ | ||
531 | sp = &h->inq.c8; | ||
532 | h->lun = sp->lun[7]; /* currently it uses only one byte */ | ||
533 | h->cmd_to_send = SEND_C9_INQUIRY; | ||
534 | queue_work(rdac_wkqd, &h->work); | ||
535 | done: | ||
536 | __blk_put_request(req->q, req); | ||
537 | } | ||
538 | |||
539 | static void submit_inquiry(struct rdac_handler *h, int page_code, | ||
540 | unsigned int len, rq_end_io_fn endio) | ||
541 | { | ||
542 | struct request *rq; | ||
543 | struct request_queue *q = bdev_get_queue(h->path->dev->bdev); | ||
544 | |||
545 | if (!q) | ||
546 | goto fail_path; | ||
547 | |||
548 | rq = get_rdac_req(h, &h->inq, len, READ); | ||
549 | if (!rq) | ||
550 | goto fail_path; | ||
551 | |||
552 | /* Prepare the command. */ | ||
553 | rq->cmd[0] = INQUIRY; | ||
554 | rq->cmd[1] = 1; | ||
555 | rq->cmd[2] = page_code; | ||
556 | rq->cmd[4] = len; | ||
557 | rq->cmd_len = COMMAND_SIZE(INQUIRY); | ||
558 | blk_execute_rq_nowait(q, NULL, rq, 1, endio); | ||
559 | return; | ||
560 | |||
561 | fail_path: | ||
562 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
563 | } | ||
564 | |||
565 | static void service_wkq(struct work_struct *work) | ||
566 | { | ||
567 | struct rdac_handler *h = container_of(work, struct rdac_handler, work); | ||
568 | |||
569 | switch (h->cmd_to_send) { | ||
570 | case SEND_C2_INQUIRY: | ||
571 | submit_inquiry(h, 0xC2, sizeof(struct c2_inquiry), c2_endio); | ||
572 | break; | ||
573 | case SEND_C4_INQUIRY: | ||
574 | submit_inquiry(h, 0xC4, sizeof(struct c4_inquiry), c4_endio); | ||
575 | break; | ||
576 | case SEND_C8_INQUIRY: | ||
577 | submit_inquiry(h, 0xC8, sizeof(struct c8_inquiry), c8_endio); | ||
578 | break; | ||
579 | case SEND_C9_INQUIRY: | ||
580 | submit_inquiry(h, 0xC9, sizeof(struct c9_inquiry), c9_endio); | ||
581 | break; | ||
582 | case SEND_MODE_SELECT: | ||
583 | submit_mode_select(h); | ||
584 | break; | ||
585 | default: | ||
586 | BUG(); | ||
587 | } | ||
588 | } | ||
589 | /* | ||
590 | * only support subpage2c until we confirm that this is just a matter of | ||
591 | * of updating firmware or not, and RDAC (basic AVT works already) for now | ||
592 | * but we can add these in in when we get time and testers | ||
593 | */ | ||
594 | static int rdac_create(struct hw_handler *hwh, unsigned argc, char **argv) | ||
595 | { | ||
596 | struct rdac_handler *h; | ||
597 | unsigned timeout; | ||
598 | |||
599 | if (argc == 0) { | ||
600 | /* No arguments: use defaults */ | ||
601 | timeout = RDAC_FAILOVER_TIMEOUT; | ||
602 | } else if (argc != 1) { | ||
603 | DMWARN("incorrect number of arguments"); | ||
604 | return -EINVAL; | ||
605 | } else { | ||
606 | if (sscanf(argv[1], "%u", &timeout) != 1) { | ||
607 | DMWARN("invalid timeout value"); | ||
608 | return -EINVAL; | ||
609 | } | ||
610 | } | ||
611 | |||
612 | h = kzalloc(sizeof(*h), GFP_KERNEL); | ||
613 | if (!h) | ||
614 | return -ENOMEM; | ||
615 | |||
616 | hwh->context = h; | ||
617 | h->timeout = timeout; | ||
618 | h->lun = UNINITIALIZED_LUN; | ||
619 | INIT_WORK(&h->work, service_wkq); | ||
620 | DMWARN("using RDAC command with timeout %u", h->timeout); | ||
621 | |||
622 | return 0; | ||
623 | } | ||
624 | |||
625 | static void rdac_destroy(struct hw_handler *hwh) | ||
626 | { | ||
627 | struct rdac_handler *h = hwh->context; | ||
628 | |||
629 | if (h->ctlr) | ||
630 | kref_put(&h->ctlr->kref, release_ctlr); | ||
631 | kfree(h); | ||
632 | hwh->context = NULL; | ||
633 | } | ||
634 | |||
635 | static unsigned rdac_error(struct hw_handler *hwh, struct bio *bio) | ||
636 | { | ||
637 | /* Try default handler */ | ||
638 | return dm_scsi_err_handler(hwh, bio); | ||
639 | } | ||
640 | |||
641 | static void rdac_pg_init(struct hw_handler *hwh, unsigned bypassed, | ||
642 | struct dm_path *path) | ||
643 | { | ||
644 | struct rdac_handler *h = hwh->context; | ||
645 | |||
646 | h->path = path; | ||
647 | switch (h->lun) { | ||
648 | case UNINITIALIZED_LUN: | ||
649 | submit_inquiry(h, 0xC8, sizeof(struct c8_inquiry), c8_endio); | ||
650 | break; | ||
651 | default: | ||
652 | submit_inquiry(h, 0xC9, sizeof(struct c9_inquiry), c9_endio); | ||
653 | } | ||
654 | } | ||
655 | |||
656 | static struct hw_handler_type rdac_handler = { | ||
657 | .name = RDAC_DM_HWH_NAME, | ||
658 | .module = THIS_MODULE, | ||
659 | .create = rdac_create, | ||
660 | .destroy = rdac_destroy, | ||
661 | .pg_init = rdac_pg_init, | ||
662 | .error = rdac_error, | ||
663 | }; | ||
664 | |||
665 | static int __init rdac_init(void) | ||
666 | { | ||
667 | int r = dm_register_hw_handler(&rdac_handler); | ||
668 | |||
669 | if (r < 0) { | ||
670 | DMERR("%s: register failed %d", RDAC_DM_HWH_NAME, r); | ||
671 | return r; | ||
672 | } | ||
673 | |||
674 | rdac_wkqd = create_singlethread_workqueue("rdac_wkqd"); | ||
675 | if (!rdac_wkqd) { | ||
676 | DMERR("Failed to create workqueue rdac_wkqd."); | ||
677 | dm_unregister_hw_handler(&rdac_handler); | ||
678 | return -ENOMEM; | ||
679 | } | ||
680 | |||
681 | DMINFO("%s: version %s loaded", RDAC_DM_HWH_NAME, RDAC_DM_HWH_VER); | ||
682 | return 0; | ||
683 | } | ||
684 | |||
685 | static void __exit rdac_exit(void) | ||
686 | { | ||
687 | int r = dm_unregister_hw_handler(&rdac_handler); | ||
688 | |||
689 | destroy_workqueue(rdac_wkqd); | ||
690 | if (r < 0) | ||
691 | DMERR("%s: unregister failed %d", RDAC_DM_HWH_NAME, r); | ||
692 | } | ||
693 | |||
694 | module_init(rdac_init); | ||
695 | module_exit(rdac_exit); | ||
696 | |||
697 | MODULE_DESCRIPTION("DM Multipath LSI/Engenio RDAC support"); | ||
698 | MODULE_AUTHOR("Mike Christie, Chandra Seetharaman"); | ||
699 | MODULE_LICENSE("GPL"); | ||
700 | MODULE_VERSION(RDAC_DM_HWH_VER); | ||