diff options
Diffstat (limited to 'drivers/md/dm-mpath-rdac.c')
-rw-r--r-- | drivers/md/dm-mpath-rdac.c | 700 |
1 files changed, 0 insertions, 700 deletions
diff --git a/drivers/md/dm-mpath-rdac.c b/drivers/md/dm-mpath-rdac.c deleted file mode 100644 index 95e77734880a..000000000000 --- a/drivers/md/dm-mpath-rdac.c +++ /dev/null | |||
@@ -1,700 +0,0 @@ | |||
1 | /* | ||
2 | * Engenio/LSI RDAC DM HW handler | ||
3 | * | ||
4 | * Copyright (C) 2005 Mike Christie. All rights reserved. | ||
5 | * Copyright (C) Chandra Seetharaman, IBM Corp. 2007 | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | * | ||
21 | */ | ||
22 | #include <scsi/scsi.h> | ||
23 | #include <scsi/scsi_cmnd.h> | ||
24 | #include <scsi/scsi_eh.h> | ||
25 | |||
26 | #define DM_MSG_PREFIX "multipath rdac" | ||
27 | |||
28 | #include "dm.h" | ||
29 | #include "dm-hw-handler.h" | ||
30 | |||
31 | #define RDAC_DM_HWH_NAME "rdac" | ||
32 | #define RDAC_DM_HWH_VER "0.4" | ||
33 | |||
34 | /* | ||
35 | * LSI mode page stuff | ||
36 | * | ||
37 | * These struct definitions and the forming of the | ||
38 | * mode page were taken from the LSI RDAC 2.4 GPL'd | ||
39 | * driver, and then converted to Linux conventions. | ||
40 | */ | ||
41 | #define RDAC_QUIESCENCE_TIME 20; | ||
42 | /* | ||
43 | * Page Codes | ||
44 | */ | ||
45 | #define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c | ||
46 | |||
47 | /* | ||
48 | * Controller modes definitions | ||
49 | */ | ||
50 | #define RDAC_MODE_TRANSFER_ALL_LUNS 0x01 | ||
51 | #define RDAC_MODE_TRANSFER_SPECIFIED_LUNS 0x02 | ||
52 | |||
53 | /* | ||
54 | * RDAC Options field | ||
55 | */ | ||
56 | #define RDAC_FORCED_QUIESENCE 0x02 | ||
57 | |||
58 | #define RDAC_FAILOVER_TIMEOUT (60 * HZ) | ||
59 | |||
60 | struct rdac_mode_6_hdr { | ||
61 | u8 data_len; | ||
62 | u8 medium_type; | ||
63 | u8 device_params; | ||
64 | u8 block_desc_len; | ||
65 | }; | ||
66 | |||
67 | struct rdac_mode_10_hdr { | ||
68 | u16 data_len; | ||
69 | u8 medium_type; | ||
70 | u8 device_params; | ||
71 | u16 reserved; | ||
72 | u16 block_desc_len; | ||
73 | }; | ||
74 | |||
75 | struct rdac_mode_common { | ||
76 | u8 controller_serial[16]; | ||
77 | u8 alt_controller_serial[16]; | ||
78 | u8 rdac_mode[2]; | ||
79 | u8 alt_rdac_mode[2]; | ||
80 | u8 quiescence_timeout; | ||
81 | u8 rdac_options; | ||
82 | }; | ||
83 | |||
84 | struct rdac_pg_legacy { | ||
85 | struct rdac_mode_6_hdr hdr; | ||
86 | u8 page_code; | ||
87 | u8 page_len; | ||
88 | struct rdac_mode_common common; | ||
89 | #define MODE6_MAX_LUN 32 | ||
90 | u8 lun_table[MODE6_MAX_LUN]; | ||
91 | u8 reserved2[32]; | ||
92 | u8 reserved3; | ||
93 | u8 reserved4; | ||
94 | }; | ||
95 | |||
96 | struct rdac_pg_expanded { | ||
97 | struct rdac_mode_10_hdr hdr; | ||
98 | u8 page_code; | ||
99 | u8 subpage_code; | ||
100 | u8 page_len[2]; | ||
101 | struct rdac_mode_common common; | ||
102 | u8 lun_table[256]; | ||
103 | u8 reserved3; | ||
104 | u8 reserved4; | ||
105 | }; | ||
106 | |||
107 | struct c9_inquiry { | ||
108 | u8 peripheral_info; | ||
109 | u8 page_code; /* 0xC9 */ | ||
110 | u8 reserved1; | ||
111 | u8 page_len; | ||
112 | u8 page_id[4]; /* "vace" */ | ||
113 | u8 avte_cvp; | ||
114 | u8 path_prio; | ||
115 | u8 reserved2[38]; | ||
116 | }; | ||
117 | |||
118 | #define SUBSYS_ID_LEN 16 | ||
119 | #define SLOT_ID_LEN 2 | ||
120 | |||
121 | struct c4_inquiry { | ||
122 | u8 peripheral_info; | ||
123 | u8 page_code; /* 0xC4 */ | ||
124 | u8 reserved1; | ||
125 | u8 page_len; | ||
126 | u8 page_id[4]; /* "subs" */ | ||
127 | u8 subsys_id[SUBSYS_ID_LEN]; | ||
128 | u8 revision[4]; | ||
129 | u8 slot_id[SLOT_ID_LEN]; | ||
130 | u8 reserved[2]; | ||
131 | }; | ||
132 | |||
133 | struct rdac_controller { | ||
134 | u8 subsys_id[SUBSYS_ID_LEN]; | ||
135 | u8 slot_id[SLOT_ID_LEN]; | ||
136 | int use_10_ms; | ||
137 | struct kref kref; | ||
138 | struct list_head node; /* list of all controllers */ | ||
139 | spinlock_t lock; | ||
140 | int submitted; | ||
141 | struct list_head cmd_list; /* list of commands to be submitted */ | ||
142 | union { | ||
143 | struct rdac_pg_legacy legacy; | ||
144 | struct rdac_pg_expanded expanded; | ||
145 | } mode_select; | ||
146 | }; | ||
147 | struct c8_inquiry { | ||
148 | u8 peripheral_info; | ||
149 | u8 page_code; /* 0xC8 */ | ||
150 | u8 reserved1; | ||
151 | u8 page_len; | ||
152 | u8 page_id[4]; /* "edid" */ | ||
153 | u8 reserved2[3]; | ||
154 | u8 vol_uniq_id_len; | ||
155 | u8 vol_uniq_id[16]; | ||
156 | u8 vol_user_label_len; | ||
157 | u8 vol_user_label[60]; | ||
158 | u8 array_uniq_id_len; | ||
159 | u8 array_unique_id[16]; | ||
160 | u8 array_user_label_len; | ||
161 | u8 array_user_label[60]; | ||
162 | u8 lun[8]; | ||
163 | }; | ||
164 | |||
165 | struct c2_inquiry { | ||
166 | u8 peripheral_info; | ||
167 | u8 page_code; /* 0xC2 */ | ||
168 | u8 reserved1; | ||
169 | u8 page_len; | ||
170 | u8 page_id[4]; /* "swr4" */ | ||
171 | u8 sw_version[3]; | ||
172 | u8 sw_date[3]; | ||
173 | u8 features_enabled; | ||
174 | u8 max_lun_supported; | ||
175 | u8 partitions[239]; /* Total allocation length should be 0xFF */ | ||
176 | }; | ||
177 | |||
178 | struct rdac_handler { | ||
179 | struct list_head entry; /* list waiting to submit MODE SELECT */ | ||
180 | unsigned timeout; | ||
181 | struct rdac_controller *ctlr; | ||
182 | #define UNINITIALIZED_LUN (1 << 8) | ||
183 | unsigned lun; | ||
184 | unsigned char sense[SCSI_SENSE_BUFFERSIZE]; | ||
185 | struct dm_path *path; | ||
186 | struct work_struct work; | ||
187 | #define SEND_C2_INQUIRY 1 | ||
188 | #define SEND_C4_INQUIRY 2 | ||
189 | #define SEND_C8_INQUIRY 3 | ||
190 | #define SEND_C9_INQUIRY 4 | ||
191 | #define SEND_MODE_SELECT 5 | ||
192 | int cmd_to_send; | ||
193 | union { | ||
194 | struct c2_inquiry c2; | ||
195 | struct c4_inquiry c4; | ||
196 | struct c8_inquiry c8; | ||
197 | struct c9_inquiry c9; | ||
198 | } inq; | ||
199 | }; | ||
200 | |||
201 | static LIST_HEAD(ctlr_list); | ||
202 | static DEFINE_SPINLOCK(list_lock); | ||
203 | static struct workqueue_struct *rdac_wkqd; | ||
204 | |||
205 | static inline int had_failures(struct request *req, int error) | ||
206 | { | ||
207 | return (error || host_byte(req->errors) != DID_OK || | ||
208 | msg_byte(req->errors) != COMMAND_COMPLETE); | ||
209 | } | ||
210 | |||
211 | static void rdac_resubmit_all(struct rdac_handler *h) | ||
212 | { | ||
213 | struct rdac_controller *ctlr = h->ctlr; | ||
214 | struct rdac_handler *tmp, *h1; | ||
215 | |||
216 | spin_lock(&ctlr->lock); | ||
217 | list_for_each_entry_safe(h1, tmp, &ctlr->cmd_list, entry) { | ||
218 | h1->cmd_to_send = SEND_C9_INQUIRY; | ||
219 | queue_work(rdac_wkqd, &h1->work); | ||
220 | list_del(&h1->entry); | ||
221 | } | ||
222 | ctlr->submitted = 0; | ||
223 | spin_unlock(&ctlr->lock); | ||
224 | } | ||
225 | |||
226 | static void mode_select_endio(struct request *req, int error) | ||
227 | { | ||
228 | struct rdac_handler *h = req->end_io_data; | ||
229 | struct scsi_sense_hdr sense_hdr; | ||
230 | int sense = 0, fail = 0; | ||
231 | |||
232 | if (had_failures(req, error)) { | ||
233 | fail = 1; | ||
234 | goto failed; | ||
235 | } | ||
236 | |||
237 | if (status_byte(req->errors) == CHECK_CONDITION) { | ||
238 | scsi_normalize_sense(req->sense, SCSI_SENSE_BUFFERSIZE, | ||
239 | &sense_hdr); | ||
240 | sense = (sense_hdr.sense_key << 16) | (sense_hdr.asc << 8) | | ||
241 | sense_hdr.ascq; | ||
242 | /* If it is retryable failure, submit the c9 inquiry again */ | ||
243 | if (sense == 0x59136 || sense == 0x68b02 || sense == 0xb8b02 || | ||
244 | sense == 0x62900) { | ||
245 | /* 0x59136 - Command lock contention | ||
246 | * 0x[6b]8b02 - Quiesense in progress or achieved | ||
247 | * 0x62900 - Power On, Reset, or Bus Device Reset | ||
248 | */ | ||
249 | h->cmd_to_send = SEND_C9_INQUIRY; | ||
250 | queue_work(rdac_wkqd, &h->work); | ||
251 | goto done; | ||
252 | } | ||
253 | if (sense) | ||
254 | DMINFO("MODE_SELECT failed on %s with sense 0x%x", | ||
255 | h->path->dev->name, sense); | ||
256 | } | ||
257 | failed: | ||
258 | if (fail || sense) | ||
259 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
260 | else | ||
261 | dm_pg_init_complete(h->path, 0); | ||
262 | |||
263 | done: | ||
264 | rdac_resubmit_all(h); | ||
265 | __blk_put_request(req->q, req); | ||
266 | } | ||
267 | |||
268 | static struct request *get_rdac_req(struct rdac_handler *h, | ||
269 | void *buffer, unsigned buflen, int rw) | ||
270 | { | ||
271 | struct request *rq; | ||
272 | struct request_queue *q = bdev_get_queue(h->path->dev->bdev); | ||
273 | |||
274 | rq = blk_get_request(q, rw, GFP_KERNEL); | ||
275 | |||
276 | if (!rq) { | ||
277 | DMINFO("get_rdac_req: blk_get_request failed"); | ||
278 | return NULL; | ||
279 | } | ||
280 | |||
281 | if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_KERNEL)) { | ||
282 | blk_put_request(rq); | ||
283 | DMINFO("get_rdac_req: blk_rq_map_kern failed"); | ||
284 | return NULL; | ||
285 | } | ||
286 | |||
287 | rq->sense = h->sense; | ||
288 | memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); | ||
289 | rq->sense_len = 0; | ||
290 | |||
291 | rq->end_io_data = h; | ||
292 | rq->timeout = h->timeout; | ||
293 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | ||
294 | rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; | ||
295 | return rq; | ||
296 | } | ||
297 | |||
298 | static struct request *rdac_failover_get(struct rdac_handler *h) | ||
299 | { | ||
300 | struct request *rq; | ||
301 | struct rdac_mode_common *common; | ||
302 | unsigned data_size; | ||
303 | |||
304 | if (h->ctlr->use_10_ms) { | ||
305 | struct rdac_pg_expanded *rdac_pg; | ||
306 | |||
307 | data_size = sizeof(struct rdac_pg_expanded); | ||
308 | rdac_pg = &h->ctlr->mode_select.expanded; | ||
309 | memset(rdac_pg, 0, data_size); | ||
310 | common = &rdac_pg->common; | ||
311 | rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40; | ||
312 | rdac_pg->subpage_code = 0x1; | ||
313 | rdac_pg->page_len[0] = 0x01; | ||
314 | rdac_pg->page_len[1] = 0x28; | ||
315 | rdac_pg->lun_table[h->lun] = 0x81; | ||
316 | } else { | ||
317 | struct rdac_pg_legacy *rdac_pg; | ||
318 | |||
319 | data_size = sizeof(struct rdac_pg_legacy); | ||
320 | rdac_pg = &h->ctlr->mode_select.legacy; | ||
321 | memset(rdac_pg, 0, data_size); | ||
322 | common = &rdac_pg->common; | ||
323 | rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER; | ||
324 | rdac_pg->page_len = 0x68; | ||
325 | rdac_pg->lun_table[h->lun] = 0x81; | ||
326 | } | ||
327 | common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS; | ||
328 | common->quiescence_timeout = RDAC_QUIESCENCE_TIME; | ||
329 | common->rdac_options = RDAC_FORCED_QUIESENCE; | ||
330 | |||
331 | /* get request for block layer packet command */ | ||
332 | rq = get_rdac_req(h, &h->ctlr->mode_select, data_size, WRITE); | ||
333 | if (!rq) { | ||
334 | DMERR("rdac_failover_get: no rq"); | ||
335 | return NULL; | ||
336 | } | ||
337 | |||
338 | /* Prepare the command. */ | ||
339 | if (h->ctlr->use_10_ms) { | ||
340 | rq->cmd[0] = MODE_SELECT_10; | ||
341 | rq->cmd[7] = data_size >> 8; | ||
342 | rq->cmd[8] = data_size & 0xff; | ||
343 | } else { | ||
344 | rq->cmd[0] = MODE_SELECT; | ||
345 | rq->cmd[4] = data_size; | ||
346 | } | ||
347 | rq->cmd_len = COMMAND_SIZE(rq->cmd[0]); | ||
348 | |||
349 | return rq; | ||
350 | } | ||
351 | |||
352 | /* Acquires h->ctlr->lock */ | ||
353 | static void submit_mode_select(struct rdac_handler *h) | ||
354 | { | ||
355 | struct request *rq; | ||
356 | struct request_queue *q = bdev_get_queue(h->path->dev->bdev); | ||
357 | |||
358 | spin_lock(&h->ctlr->lock); | ||
359 | if (h->ctlr->submitted) { | ||
360 | list_add(&h->entry, &h->ctlr->cmd_list); | ||
361 | goto drop_lock; | ||
362 | } | ||
363 | |||
364 | if (!q) { | ||
365 | DMINFO("submit_mode_select: no queue"); | ||
366 | goto fail_path; | ||
367 | } | ||
368 | |||
369 | rq = rdac_failover_get(h); | ||
370 | if (!rq) { | ||
371 | DMERR("submit_mode_select: no rq"); | ||
372 | goto fail_path; | ||
373 | } | ||
374 | |||
375 | DMINFO("queueing MODE_SELECT command on %s", h->path->dev->name); | ||
376 | |||
377 | blk_execute_rq_nowait(q, NULL, rq, 1, mode_select_endio); | ||
378 | h->ctlr->submitted = 1; | ||
379 | goto drop_lock; | ||
380 | fail_path: | ||
381 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
382 | drop_lock: | ||
383 | spin_unlock(&h->ctlr->lock); | ||
384 | } | ||
385 | |||
386 | static void release_ctlr(struct kref *kref) | ||
387 | { | ||
388 | struct rdac_controller *ctlr; | ||
389 | ctlr = container_of(kref, struct rdac_controller, kref); | ||
390 | |||
391 | spin_lock(&list_lock); | ||
392 | list_del(&ctlr->node); | ||
393 | spin_unlock(&list_lock); | ||
394 | kfree(ctlr); | ||
395 | } | ||
396 | |||
397 | static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id) | ||
398 | { | ||
399 | struct rdac_controller *ctlr, *tmp; | ||
400 | |||
401 | spin_lock(&list_lock); | ||
402 | |||
403 | list_for_each_entry(tmp, &ctlr_list, node) { | ||
404 | if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) && | ||
405 | (memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) { | ||
406 | kref_get(&tmp->kref); | ||
407 | spin_unlock(&list_lock); | ||
408 | return tmp; | ||
409 | } | ||
410 | } | ||
411 | ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC); | ||
412 | if (!ctlr) | ||
413 | goto done; | ||
414 | |||
415 | /* initialize fields of controller */ | ||
416 | memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN); | ||
417 | memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN); | ||
418 | kref_init(&ctlr->kref); | ||
419 | spin_lock_init(&ctlr->lock); | ||
420 | ctlr->submitted = 0; | ||
421 | ctlr->use_10_ms = -1; | ||
422 | INIT_LIST_HEAD(&ctlr->cmd_list); | ||
423 | list_add(&ctlr->node, &ctlr_list); | ||
424 | done: | ||
425 | spin_unlock(&list_lock); | ||
426 | return ctlr; | ||
427 | } | ||
428 | |||
429 | static void c4_endio(struct request *req, int error) | ||
430 | { | ||
431 | struct rdac_handler *h = req->end_io_data; | ||
432 | struct c4_inquiry *sp; | ||
433 | |||
434 | if (had_failures(req, error)) { | ||
435 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
436 | goto done; | ||
437 | } | ||
438 | |||
439 | sp = &h->inq.c4; | ||
440 | |||
441 | h->ctlr = get_controller(sp->subsys_id, sp->slot_id); | ||
442 | |||
443 | if (h->ctlr) { | ||
444 | h->cmd_to_send = SEND_C9_INQUIRY; | ||
445 | queue_work(rdac_wkqd, &h->work); | ||
446 | } else | ||
447 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
448 | done: | ||
449 | __blk_put_request(req->q, req); | ||
450 | } | ||
451 | |||
452 | static void c2_endio(struct request *req, int error) | ||
453 | { | ||
454 | struct rdac_handler *h = req->end_io_data; | ||
455 | struct c2_inquiry *sp; | ||
456 | |||
457 | if (had_failures(req, error)) { | ||
458 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
459 | goto done; | ||
460 | } | ||
461 | |||
462 | sp = &h->inq.c2; | ||
463 | |||
464 | /* If more than MODE6_MAX_LUN luns are supported, use mode select 10 */ | ||
465 | if (sp->max_lun_supported >= MODE6_MAX_LUN) | ||
466 | h->ctlr->use_10_ms = 1; | ||
467 | else | ||
468 | h->ctlr->use_10_ms = 0; | ||
469 | |||
470 | h->cmd_to_send = SEND_MODE_SELECT; | ||
471 | queue_work(rdac_wkqd, &h->work); | ||
472 | done: | ||
473 | __blk_put_request(req->q, req); | ||
474 | } | ||
475 | |||
476 | static void c9_endio(struct request *req, int error) | ||
477 | { | ||
478 | struct rdac_handler *h = req->end_io_data; | ||
479 | struct c9_inquiry *sp; | ||
480 | |||
481 | if (had_failures(req, error)) { | ||
482 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
483 | goto done; | ||
484 | } | ||
485 | |||
486 | /* We need to look at the sense keys here to take clear action. | ||
487 | * For now simple logic: If the host is in AVT mode or if controller | ||
488 | * owns the lun, return dm_pg_init_complete(), otherwise submit | ||
489 | * MODE SELECT. | ||
490 | */ | ||
491 | sp = &h->inq.c9; | ||
492 | |||
493 | /* If in AVT mode, return success */ | ||
494 | if ((sp->avte_cvp >> 7) == 0x1) { | ||
495 | dm_pg_init_complete(h->path, 0); | ||
496 | goto done; | ||
497 | } | ||
498 | |||
499 | /* If the controller on this path owns the LUN, return success */ | ||
500 | if (sp->avte_cvp & 0x1) { | ||
501 | dm_pg_init_complete(h->path, 0); | ||
502 | goto done; | ||
503 | } | ||
504 | |||
505 | if (h->ctlr) { | ||
506 | if (h->ctlr->use_10_ms == -1) | ||
507 | h->cmd_to_send = SEND_C2_INQUIRY; | ||
508 | else | ||
509 | h->cmd_to_send = SEND_MODE_SELECT; | ||
510 | } else | ||
511 | h->cmd_to_send = SEND_C4_INQUIRY; | ||
512 | queue_work(rdac_wkqd, &h->work); | ||
513 | done: | ||
514 | __blk_put_request(req->q, req); | ||
515 | } | ||
516 | |||
517 | static void c8_endio(struct request *req, int error) | ||
518 | { | ||
519 | struct rdac_handler *h = req->end_io_data; | ||
520 | struct c8_inquiry *sp; | ||
521 | |||
522 | if (had_failures(req, error)) { | ||
523 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
524 | goto done; | ||
525 | } | ||
526 | |||
527 | /* We need to look at the sense keys here to take clear action. | ||
528 | * For now simple logic: Get the lun from the inquiry page. | ||
529 | */ | ||
530 | sp = &h->inq.c8; | ||
531 | h->lun = sp->lun[7]; /* currently it uses only one byte */ | ||
532 | h->cmd_to_send = SEND_C9_INQUIRY; | ||
533 | queue_work(rdac_wkqd, &h->work); | ||
534 | done: | ||
535 | __blk_put_request(req->q, req); | ||
536 | } | ||
537 | |||
538 | static void submit_inquiry(struct rdac_handler *h, int page_code, | ||
539 | unsigned int len, rq_end_io_fn endio) | ||
540 | { | ||
541 | struct request *rq; | ||
542 | struct request_queue *q = bdev_get_queue(h->path->dev->bdev); | ||
543 | |||
544 | if (!q) | ||
545 | goto fail_path; | ||
546 | |||
547 | rq = get_rdac_req(h, &h->inq, len, READ); | ||
548 | if (!rq) | ||
549 | goto fail_path; | ||
550 | |||
551 | /* Prepare the command. */ | ||
552 | rq->cmd[0] = INQUIRY; | ||
553 | rq->cmd[1] = 1; | ||
554 | rq->cmd[2] = page_code; | ||
555 | rq->cmd[4] = len; | ||
556 | rq->cmd_len = COMMAND_SIZE(INQUIRY); | ||
557 | blk_execute_rq_nowait(q, NULL, rq, 1, endio); | ||
558 | return; | ||
559 | |||
560 | fail_path: | ||
561 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
562 | } | ||
563 | |||
564 | static void service_wkq(struct work_struct *work) | ||
565 | { | ||
566 | struct rdac_handler *h = container_of(work, struct rdac_handler, work); | ||
567 | |||
568 | switch (h->cmd_to_send) { | ||
569 | case SEND_C2_INQUIRY: | ||
570 | submit_inquiry(h, 0xC2, sizeof(struct c2_inquiry), c2_endio); | ||
571 | break; | ||
572 | case SEND_C4_INQUIRY: | ||
573 | submit_inquiry(h, 0xC4, sizeof(struct c4_inquiry), c4_endio); | ||
574 | break; | ||
575 | case SEND_C8_INQUIRY: | ||
576 | submit_inquiry(h, 0xC8, sizeof(struct c8_inquiry), c8_endio); | ||
577 | break; | ||
578 | case SEND_C9_INQUIRY: | ||
579 | submit_inquiry(h, 0xC9, sizeof(struct c9_inquiry), c9_endio); | ||
580 | break; | ||
581 | case SEND_MODE_SELECT: | ||
582 | submit_mode_select(h); | ||
583 | break; | ||
584 | default: | ||
585 | BUG(); | ||
586 | } | ||
587 | } | ||
588 | /* | ||
589 | * only support subpage2c until we confirm that this is just a matter of | ||
590 | * of updating firmware or not, and RDAC (basic AVT works already) for now | ||
591 | * but we can add these in in when we get time and testers | ||
592 | */ | ||
593 | static int rdac_create(struct hw_handler *hwh, unsigned argc, char **argv) | ||
594 | { | ||
595 | struct rdac_handler *h; | ||
596 | unsigned timeout; | ||
597 | |||
598 | if (argc == 0) { | ||
599 | /* No arguments: use defaults */ | ||
600 | timeout = RDAC_FAILOVER_TIMEOUT; | ||
601 | } else if (argc != 1) { | ||
602 | DMWARN("incorrect number of arguments"); | ||
603 | return -EINVAL; | ||
604 | } else { | ||
605 | if (sscanf(argv[1], "%u", &timeout) != 1) { | ||
606 | DMWARN("invalid timeout value"); | ||
607 | return -EINVAL; | ||
608 | } | ||
609 | } | ||
610 | |||
611 | h = kzalloc(sizeof(*h), GFP_KERNEL); | ||
612 | if (!h) | ||
613 | return -ENOMEM; | ||
614 | |||
615 | hwh->context = h; | ||
616 | h->timeout = timeout; | ||
617 | h->lun = UNINITIALIZED_LUN; | ||
618 | INIT_WORK(&h->work, service_wkq); | ||
619 | DMWARN("using RDAC command with timeout %u", h->timeout); | ||
620 | |||
621 | return 0; | ||
622 | } | ||
623 | |||
624 | static void rdac_destroy(struct hw_handler *hwh) | ||
625 | { | ||
626 | struct rdac_handler *h = hwh->context; | ||
627 | |||
628 | if (h->ctlr) | ||
629 | kref_put(&h->ctlr->kref, release_ctlr); | ||
630 | kfree(h); | ||
631 | hwh->context = NULL; | ||
632 | } | ||
633 | |||
634 | static unsigned rdac_error(struct hw_handler *hwh, struct bio *bio) | ||
635 | { | ||
636 | /* Try default handler */ | ||
637 | return dm_scsi_err_handler(hwh, bio); | ||
638 | } | ||
639 | |||
640 | static void rdac_pg_init(struct hw_handler *hwh, unsigned bypassed, | ||
641 | struct dm_path *path) | ||
642 | { | ||
643 | struct rdac_handler *h = hwh->context; | ||
644 | |||
645 | h->path = path; | ||
646 | switch (h->lun) { | ||
647 | case UNINITIALIZED_LUN: | ||
648 | submit_inquiry(h, 0xC8, sizeof(struct c8_inquiry), c8_endio); | ||
649 | break; | ||
650 | default: | ||
651 | submit_inquiry(h, 0xC9, sizeof(struct c9_inquiry), c9_endio); | ||
652 | } | ||
653 | } | ||
654 | |||
655 | static struct hw_handler_type rdac_handler = { | ||
656 | .name = RDAC_DM_HWH_NAME, | ||
657 | .module = THIS_MODULE, | ||
658 | .create = rdac_create, | ||
659 | .destroy = rdac_destroy, | ||
660 | .pg_init = rdac_pg_init, | ||
661 | .error = rdac_error, | ||
662 | }; | ||
663 | |||
664 | static int __init rdac_init(void) | ||
665 | { | ||
666 | int r; | ||
667 | |||
668 | rdac_wkqd = create_singlethread_workqueue("rdac_wkqd"); | ||
669 | if (!rdac_wkqd) { | ||
670 | DMERR("Failed to create workqueue rdac_wkqd."); | ||
671 | return -ENOMEM; | ||
672 | } | ||
673 | |||
674 | r = dm_register_hw_handler(&rdac_handler); | ||
675 | if (r < 0) { | ||
676 | DMERR("%s: register failed %d", RDAC_DM_HWH_NAME, r); | ||
677 | destroy_workqueue(rdac_wkqd); | ||
678 | return r; | ||
679 | } | ||
680 | |||
681 | DMINFO("%s: version %s loaded", RDAC_DM_HWH_NAME, RDAC_DM_HWH_VER); | ||
682 | return 0; | ||
683 | } | ||
684 | |||
685 | static void __exit rdac_exit(void) | ||
686 | { | ||
687 | int r = dm_unregister_hw_handler(&rdac_handler); | ||
688 | |||
689 | destroy_workqueue(rdac_wkqd); | ||
690 | if (r < 0) | ||
691 | DMERR("%s: unregister failed %d", RDAC_DM_HWH_NAME, r); | ||
692 | } | ||
693 | |||
694 | module_init(rdac_init); | ||
695 | module_exit(rdac_exit); | ||
696 | |||
697 | MODULE_DESCRIPTION("DM Multipath LSI/Engenio RDAC support"); | ||
698 | MODULE_AUTHOR("Mike Christie, Chandra Seetharaman"); | ||
699 | MODULE_LICENSE("GPL"); | ||
700 | MODULE_VERSION(RDAC_DM_HWH_VER); | ||