diff options
Diffstat (limited to 'drivers/scsi/snic/snic_scsi.c')
-rw-r--r-- | drivers/scsi/snic/snic_scsi.c | 2632 |
1 files changed, 2632 insertions, 0 deletions
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c new file mode 100644 index 000000000000..2c7b4c321cbe --- /dev/null +++ b/drivers/scsi/snic/snic_scsi.c | |||
@@ -0,0 +1,2632 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Cisco Systems, Inc. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you may redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; version 2 of the License. | ||
7 | * | ||
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
10 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
11 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
12 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
13 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
14 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
15 | * SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #include <linux/mempool.h> | ||
19 | #include <linux/errno.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/workqueue.h> | ||
22 | #include <linux/pci.h> | ||
23 | #include <linux/spinlock.h> | ||
24 | #include <linux/delay.h> | ||
25 | #include <linux/gfp.h> | ||
26 | #include <scsi/scsi.h> | ||
27 | #include <scsi/scsi_host.h> | ||
28 | #include <scsi/scsi_device.h> | ||
29 | #include <scsi/scsi_cmnd.h> | ||
30 | #include <scsi/scsi_tcq.h> | ||
31 | #include <scsi/scsi_dbg.h> | ||
32 | |||
33 | #include "snic_io.h" | ||
34 | #include "snic.h" | ||
35 | |||
36 | #define snic_cmd_tag(sc) (((struct scsi_cmnd *) sc)->request->tag) | ||
37 | |||
38 | const char *snic_state_str[] = { | ||
39 | [SNIC_INIT] = "SNIC_INIT", | ||
40 | [SNIC_ERROR] = "SNIC_ERROR", | ||
41 | [SNIC_ONLINE] = "SNIC_ONLINE", | ||
42 | [SNIC_OFFLINE] = "SNIC_OFFLINE", | ||
43 | [SNIC_FWRESET] = "SNIC_FWRESET", | ||
44 | }; | ||
45 | |||
46 | static const char * const snic_req_state_str[] = { | ||
47 | [SNIC_IOREQ_NOT_INITED] = "SNIC_IOREQ_NOT_INITED", | ||
48 | [SNIC_IOREQ_PENDING] = "SNIC_IOREQ_PENDING", | ||
49 | [SNIC_IOREQ_ABTS_PENDING] = "SNIC_IOREQ_ABTS_PENDING", | ||
50 | [SNIC_IOREQ_ABTS_COMPLETE] = "SNIC_IOREQ_ABTS_COMPELTE", | ||
51 | [SNIC_IOREQ_LR_PENDING] = "SNIC_IOREQ_LR_PENDING", | ||
52 | [SNIC_IOREQ_LR_COMPLETE] = "SNIC_IOREQ_LR_COMPELTE", | ||
53 | [SNIC_IOREQ_COMPLETE] = "SNIC_IOREQ_CMD_COMPELTE", | ||
54 | }; | ||
55 | |||
56 | /* snic cmd status strings */ | ||
57 | static const char * const snic_io_status_str[] = { | ||
58 | [SNIC_STAT_IO_SUCCESS] = "SNIC_STAT_IO_SUCCESS", /* 0x0 */ | ||
59 | [SNIC_STAT_INVALID_HDR] = "SNIC_STAT_INVALID_HDR", | ||
60 | [SNIC_STAT_OUT_OF_RES] = "SNIC_STAT_OUT_OF_RES", | ||
61 | [SNIC_STAT_INVALID_PARM] = "SNIC_STAT_INVALID_PARM", | ||
62 | [SNIC_STAT_REQ_NOT_SUP] = "SNIC_STAT_REQ_NOT_SUP", | ||
63 | [SNIC_STAT_IO_NOT_FOUND] = "SNIC_STAT_IO_NOT_FOUND", | ||
64 | [SNIC_STAT_ABORTED] = "SNIC_STAT_ABORTED", | ||
65 | [SNIC_STAT_TIMEOUT] = "SNIC_STAT_TIMEOUT", | ||
66 | [SNIC_STAT_SGL_INVALID] = "SNIC_STAT_SGL_INVALID", | ||
67 | [SNIC_STAT_DATA_CNT_MISMATCH] = "SNIC_STAT_DATA_CNT_MISMATCH", | ||
68 | [SNIC_STAT_FW_ERR] = "SNIC_STAT_FW_ERR", | ||
69 | [SNIC_STAT_ITMF_REJECT] = "SNIC_STAT_ITMF_REJECT", | ||
70 | [SNIC_STAT_ITMF_FAIL] = "SNIC_STAT_ITMF_FAIL", | ||
71 | [SNIC_STAT_ITMF_INCORRECT_LUN] = "SNIC_STAT_ITMF_INCORRECT_LUN", | ||
72 | [SNIC_STAT_CMND_REJECT] = "SNIC_STAT_CMND_REJECT", | ||
73 | [SNIC_STAT_DEV_OFFLINE] = "SNIC_STAT_DEV_OFFLINE", | ||
74 | [SNIC_STAT_NO_BOOTLUN] = "SNIC_STAT_NO_BOOTLUN", | ||
75 | [SNIC_STAT_SCSI_ERR] = "SNIC_STAT_SCSI_ERR", | ||
76 | [SNIC_STAT_NOT_READY] = "SNIC_STAT_NOT_READY", | ||
77 | [SNIC_STAT_FATAL_ERROR] = "SNIC_STAT_FATAL_ERROR", | ||
78 | }; | ||
79 | |||
80 | static void snic_scsi_cleanup(struct snic *, int); | ||
81 | |||
82 | const char * | ||
83 | snic_state_to_str(unsigned int state) | ||
84 | { | ||
85 | if (state >= ARRAY_SIZE(snic_state_str) || !snic_state_str[state]) | ||
86 | return "Unknown"; | ||
87 | |||
88 | return snic_state_str[state]; | ||
89 | } | ||
90 | |||
91 | static const char * | ||
92 | snic_io_status_to_str(unsigned int state) | ||
93 | { | ||
94 | if ((state >= ARRAY_SIZE(snic_io_status_str)) || | ||
95 | (!snic_io_status_str[state])) | ||
96 | return "Unknown"; | ||
97 | |||
98 | return snic_io_status_str[state]; | ||
99 | } | ||
100 | |||
101 | static const char * | ||
102 | snic_ioreq_state_to_str(unsigned int state) | ||
103 | { | ||
104 | if (state >= ARRAY_SIZE(snic_req_state_str) || | ||
105 | !snic_req_state_str[state]) | ||
106 | return "Unknown"; | ||
107 | |||
108 | return snic_req_state_str[state]; | ||
109 | } | ||
110 | |||
111 | static inline spinlock_t * | ||
112 | snic_io_lock_hash(struct snic *snic, struct scsi_cmnd *sc) | ||
113 | { | ||
114 | u32 hash = snic_cmd_tag(sc) & (SNIC_IO_LOCKS - 1); | ||
115 | |||
116 | return &snic->io_req_lock[hash]; | ||
117 | } | ||
118 | |||
119 | static inline spinlock_t * | ||
120 | snic_io_lock_tag(struct snic *snic, int tag) | ||
121 | { | ||
122 | return &snic->io_req_lock[tag & (SNIC_IO_LOCKS - 1)]; | ||
123 | } | ||
124 | |||
125 | /* snic_release_req_buf : Releases snic_req_info */ | ||
126 | static void | ||
127 | snic_release_req_buf(struct snic *snic, | ||
128 | struct snic_req_info *rqi, | ||
129 | struct scsi_cmnd *sc) | ||
130 | { | ||
131 | struct snic_host_req *req = rqi_to_req(rqi); | ||
132 | |||
133 | /* Freeing cmd without marking completion, not okay */ | ||
134 | SNIC_BUG_ON(!((CMD_STATE(sc) == SNIC_IOREQ_COMPLETE) || | ||
135 | (CMD_STATE(sc) == SNIC_IOREQ_ABTS_COMPLETE) || | ||
136 | (CMD_FLAGS(sc) & SNIC_DEV_RST_NOTSUP) || | ||
137 | (CMD_FLAGS(sc) & SNIC_IO_INTERNAL_TERM_ISSUED) || | ||
138 | (CMD_FLAGS(sc) & SNIC_DEV_RST_TERM_ISSUED) || | ||
139 | (CMD_FLAGS(sc) & SNIC_SCSI_CLEANUP) || | ||
140 | (CMD_STATE(sc) == SNIC_IOREQ_LR_COMPLETE))); | ||
141 | |||
142 | SNIC_SCSI_DBG(snic->shost, | ||
143 | "Rel_req:sc %p:tag %x:rqi %p:ioreq %p:abt %p:dr %p: state %s:flags 0x%llx\n", | ||
144 | sc, snic_cmd_tag(sc), rqi, rqi->req, rqi->abort_req, | ||
145 | rqi->dr_req, snic_ioreq_state_to_str(CMD_STATE(sc)), | ||
146 | CMD_FLAGS(sc)); | ||
147 | |||
148 | if (req->u.icmnd.sense_addr) | ||
149 | pci_unmap_single(snic->pdev, | ||
150 | le64_to_cpu(req->u.icmnd.sense_addr), | ||
151 | SCSI_SENSE_BUFFERSIZE, | ||
152 | PCI_DMA_FROMDEVICE); | ||
153 | |||
154 | scsi_dma_unmap(sc); | ||
155 | |||
156 | snic_req_free(snic, rqi); | ||
157 | } /* end of snic_release_req_buf */ | ||
158 | |||
159 | /* | ||
160 | * snic_queue_icmnd_req : Queues snic_icmnd request | ||
161 | */ | ||
162 | static int | ||
163 | snic_queue_icmnd_req(struct snic *snic, | ||
164 | struct snic_req_info *rqi, | ||
165 | struct scsi_cmnd *sc, | ||
166 | int sg_cnt) | ||
167 | { | ||
168 | struct scatterlist *sg; | ||
169 | struct snic_sg_desc *sgd; | ||
170 | dma_addr_t pa = 0; | ||
171 | struct scsi_lun lun; | ||
172 | u16 flags = 0; | ||
173 | int ret = 0; | ||
174 | unsigned int i; | ||
175 | |||
176 | if (sg_cnt) { | ||
177 | flags = SNIC_ICMND_ESGL; | ||
178 | sgd = (struct snic_sg_desc *) req_to_sgl(rqi->req); | ||
179 | |||
180 | for_each_sg(scsi_sglist(sc), sg, sg_cnt, i) { | ||
181 | sgd->addr = cpu_to_le64(sg_dma_address(sg)); | ||
182 | sgd->len = cpu_to_le32(sg_dma_len(sg)); | ||
183 | sgd->_resvd = 0; | ||
184 | sgd++; | ||
185 | } | ||
186 | } | ||
187 | |||
188 | pa = pci_map_single(snic->pdev, | ||
189 | sc->sense_buffer, | ||
190 | SCSI_SENSE_BUFFERSIZE, | ||
191 | PCI_DMA_FROMDEVICE); | ||
192 | |||
193 | if (pci_dma_mapping_error(snic->pdev, pa)) { | ||
194 | SNIC_HOST_ERR(snic->shost, | ||
195 | "QIcmnd:PCI Map Failed for sns buf %p tag %x\n", | ||
196 | sc->sense_buffer, snic_cmd_tag(sc)); | ||
197 | ret = -ENOMEM; | ||
198 | |||
199 | return ret; | ||
200 | } | ||
201 | |||
202 | int_to_scsilun(sc->device->lun, &lun); | ||
203 | if (sc->sc_data_direction == DMA_FROM_DEVICE) | ||
204 | flags |= SNIC_ICMND_RD; | ||
205 | if (sc->sc_data_direction == DMA_TO_DEVICE) | ||
206 | flags |= SNIC_ICMND_WR; | ||
207 | |||
208 | /* Initialize icmnd */ | ||
209 | snic_icmnd_init(rqi->req, | ||
210 | snic_cmd_tag(sc), | ||
211 | snic->config.hid, /* hid */ | ||
212 | (ulong) rqi, | ||
213 | flags, /* command flags */ | ||
214 | rqi->tgt_id, | ||
215 | lun.scsi_lun, | ||
216 | sc->cmnd, | ||
217 | sc->cmd_len, | ||
218 | scsi_bufflen(sc), | ||
219 | sg_cnt, | ||
220 | (ulong) req_to_sgl(rqi->req), | ||
221 | pa, /* sense buffer pa */ | ||
222 | SCSI_SENSE_BUFFERSIZE); | ||
223 | |||
224 | ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len); | ||
225 | if (ret) | ||
226 | SNIC_HOST_ERR(snic->shost, | ||
227 | "QIcmnd: Queuing Icmnd Failed. ret = %d\n", | ||
228 | ret); | ||
229 | |||
230 | return ret; | ||
231 | } /* end of snic_queue_icmnd_req */ | ||
232 | |||
233 | /* | ||
234 | * snic_issue_scsi_req : Prepares IO request and Issues to FW. | ||
235 | */ | ||
236 | static int | ||
237 | snic_issue_scsi_req(struct snic *snic, | ||
238 | struct snic_tgt *tgt, | ||
239 | struct scsi_cmnd *sc) | ||
240 | { | ||
241 | struct snic_req_info *rqi = NULL; | ||
242 | int sg_cnt = 0; | ||
243 | int ret = 0; | ||
244 | u32 tag = snic_cmd_tag(sc); | ||
245 | u64 cmd_trc = 0, cmd_st_flags = 0; | ||
246 | spinlock_t *io_lock = NULL; | ||
247 | unsigned long flags; | ||
248 | |||
249 | CMD_STATE(sc) = SNIC_IOREQ_NOT_INITED; | ||
250 | CMD_FLAGS(sc) = SNIC_NO_FLAGS; | ||
251 | sg_cnt = scsi_dma_map(sc); | ||
252 | if (sg_cnt < 0) { | ||
253 | SNIC_TRC((u16)snic->shost->host_no, tag, (ulong) sc, 0, | ||
254 | sc->cmnd[0], sg_cnt, CMD_STATE(sc)); | ||
255 | |||
256 | SNIC_HOST_ERR(snic->shost, "issue_sc:Failed to map SG List.\n"); | ||
257 | ret = -ENOMEM; | ||
258 | |||
259 | goto issue_sc_end; | ||
260 | } | ||
261 | |||
262 | rqi = snic_req_init(snic, sg_cnt); | ||
263 | if (!rqi) { | ||
264 | scsi_dma_unmap(sc); | ||
265 | ret = -ENOMEM; | ||
266 | |||
267 | goto issue_sc_end; | ||
268 | } | ||
269 | |||
270 | rqi->tgt_id = tgt->id; | ||
271 | rqi->sc = sc; | ||
272 | |||
273 | CMD_STATE(sc) = SNIC_IOREQ_PENDING; | ||
274 | CMD_SP(sc) = (char *) rqi; | ||
275 | cmd_trc = SNIC_TRC_CMD(sc); | ||
276 | CMD_FLAGS(sc) |= (SNIC_IO_INITIALIZED | SNIC_IO_ISSUED); | ||
277 | cmd_st_flags = SNIC_TRC_CMD_STATE_FLAGS(sc); | ||
278 | io_lock = snic_io_lock_hash(snic, sc); | ||
279 | |||
280 | /* create wq desc and enqueue it */ | ||
281 | ret = snic_queue_icmnd_req(snic, rqi, sc, sg_cnt); | ||
282 | if (ret) { | ||
283 | SNIC_HOST_ERR(snic->shost, | ||
284 | "issue_sc: icmnd qing Failed for sc %p, err %d\n", | ||
285 | sc, ret); | ||
286 | |||
287 | spin_lock_irqsave(io_lock, flags); | ||
288 | rqi = (struct snic_req_info *) CMD_SP(sc); | ||
289 | CMD_SP(sc) = NULL; | ||
290 | CMD_STATE(sc) = SNIC_IOREQ_COMPLETE; | ||
291 | CMD_FLAGS(sc) &= ~SNIC_IO_ISSUED; /* turn off the flag */ | ||
292 | spin_unlock_irqrestore(io_lock, flags); | ||
293 | |||
294 | if (rqi) | ||
295 | snic_release_req_buf(snic, rqi, sc); | ||
296 | |||
297 | SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, 0, 0, 0, | ||
298 | SNIC_TRC_CMD_STATE_FLAGS(sc)); | ||
299 | } else { | ||
300 | u32 io_sz = scsi_bufflen(sc) >> 9; | ||
301 | u32 qtime = jiffies - rqi->start_time; | ||
302 | struct snic_io_stats *iostats = &snic->s_stats.io; | ||
303 | |||
304 | if (io_sz > atomic64_read(&iostats->max_io_sz)) | ||
305 | atomic64_set(&iostats->max_io_sz, io_sz); | ||
306 | |||
307 | if (qtime > atomic64_read(&iostats->max_qtime)) | ||
308 | atomic64_set(&iostats->max_qtime, qtime); | ||
309 | |||
310 | SNIC_SCSI_DBG(snic->shost, | ||
311 | "issue_sc:sc %p, tag %d queued to WQ.\n", | ||
312 | sc, tag); | ||
313 | |||
314 | SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, (ulong) rqi, | ||
315 | sg_cnt, cmd_trc, cmd_st_flags); | ||
316 | } | ||
317 | |||
318 | issue_sc_end: | ||
319 | |||
320 | return ret; | ||
321 | } /* end of snic_issue_scsi_req */ | ||
322 | |||
323 | |||
324 | /* | ||
325 | * snic_queuecommand | ||
326 | * Routine to send a scsi cdb to LLD | ||
327 | * Called with host_lock held and interrupts disabled | ||
328 | */ | ||
329 | int | ||
330 | snic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc) | ||
331 | { | ||
332 | struct snic_tgt *tgt = NULL; | ||
333 | struct snic *snic = shost_priv(shost); | ||
334 | int ret; | ||
335 | |||
336 | tgt = starget_to_tgt(scsi_target(sc->device)); | ||
337 | ret = snic_tgt_chkready(tgt); | ||
338 | if (ret) { | ||
339 | SNIC_HOST_ERR(shost, "Tgt %p id %d Not Ready.\n", tgt, tgt->id); | ||
340 | atomic64_inc(&snic->s_stats.misc.tgt_not_rdy); | ||
341 | sc->result = ret; | ||
342 | sc->scsi_done(sc); | ||
343 | |||
344 | return 0; | ||
345 | } | ||
346 | |||
347 | if (snic_get_state(snic) != SNIC_ONLINE) { | ||
348 | SNIC_HOST_ERR(shost, "snic state is %s\n", | ||
349 | snic_state_str[snic_get_state(snic)]); | ||
350 | |||
351 | return SCSI_MLQUEUE_HOST_BUSY; | ||
352 | } | ||
353 | atomic_inc(&snic->ios_inflight); | ||
354 | |||
355 | SNIC_SCSI_DBG(shost, "sc %p Tag %d (sc %0x) lun %lld in snic_qcmd\n", | ||
356 | sc, snic_cmd_tag(sc), sc->cmnd[0], sc->device->lun); | ||
357 | |||
358 | memset(scsi_cmd_priv(sc), 0, sizeof(struct snic_internal_io_state)); | ||
359 | |||
360 | ret = snic_issue_scsi_req(snic, tgt, sc); | ||
361 | if (ret) { | ||
362 | SNIC_HOST_ERR(shost, "Failed to Q, Scsi Req w/ err %d.\n", ret); | ||
363 | ret = SCSI_MLQUEUE_HOST_BUSY; | ||
364 | } else | ||
365 | snic_stats_update_active_ios(&snic->s_stats); | ||
366 | |||
367 | atomic_dec(&snic->ios_inflight); | ||
368 | |||
369 | return ret; | ||
370 | } /* end of snic_queuecommand */ | ||
371 | |||
372 | /* | ||
373 | * snic_process_abts_pending_state: | ||
374 | * caller should hold IO lock | ||
375 | */ | ||
376 | static void | ||
377 | snic_proc_tmreq_pending_state(struct snic *snic, | ||
378 | struct scsi_cmnd *sc, | ||
379 | u8 cmpl_status) | ||
380 | { | ||
381 | int state = CMD_STATE(sc); | ||
382 | |||
383 | if (state == SNIC_IOREQ_ABTS_PENDING) | ||
384 | CMD_FLAGS(sc) |= SNIC_IO_ABTS_PENDING; | ||
385 | else if (state == SNIC_IOREQ_LR_PENDING) | ||
386 | CMD_FLAGS(sc) |= SNIC_DEV_RST_PENDING; | ||
387 | else | ||
388 | SNIC_BUG_ON(1); | ||
389 | |||
390 | switch (cmpl_status) { | ||
391 | case SNIC_STAT_IO_SUCCESS: | ||
392 | CMD_FLAGS(sc) |= SNIC_IO_DONE; | ||
393 | break; | ||
394 | |||
395 | case SNIC_STAT_ABORTED: | ||
396 | CMD_FLAGS(sc) |= SNIC_IO_ABORTED; | ||
397 | break; | ||
398 | |||
399 | default: | ||
400 | SNIC_BUG_ON(1); | ||
401 | } | ||
402 | } | ||
403 | |||
404 | /* | ||
405 | * snic_process_io_failed_state: | ||
406 | * Processes IO's error states | ||
407 | */ | ||
408 | static void | ||
409 | snic_process_io_failed_state(struct snic *snic, | ||
410 | struct snic_icmnd_cmpl *icmnd_cmpl, | ||
411 | struct scsi_cmnd *sc, | ||
412 | u8 cmpl_stat) | ||
413 | { | ||
414 | int res = 0; | ||
415 | |||
416 | switch (cmpl_stat) { | ||
417 | case SNIC_STAT_TIMEOUT: /* Req was timedout */ | ||
418 | atomic64_inc(&snic->s_stats.misc.io_tmo); | ||
419 | res = DID_TIME_OUT; | ||
420 | break; | ||
421 | |||
422 | case SNIC_STAT_ABORTED: /* Req was aborted */ | ||
423 | atomic64_inc(&snic->s_stats.misc.io_aborted); | ||
424 | res = DID_ABORT; | ||
425 | break; | ||
426 | |||
427 | case SNIC_STAT_DATA_CNT_MISMATCH:/* Recv/Sent more/less data than exp */ | ||
428 | atomic64_inc(&snic->s_stats.misc.data_cnt_mismat); | ||
429 | scsi_set_resid(sc, le32_to_cpu(icmnd_cmpl->resid)); | ||
430 | res = DID_ERROR; | ||
431 | break; | ||
432 | |||
433 | case SNIC_STAT_OUT_OF_RES: /* Out of resources to complete request */ | ||
434 | atomic64_inc(&snic->s_stats.fw.out_of_res); | ||
435 | res = DID_REQUEUE; | ||
436 | break; | ||
437 | |||
438 | case SNIC_STAT_IO_NOT_FOUND: /* Requested I/O was not found */ | ||
439 | atomic64_inc(&snic->s_stats.io.io_not_found); | ||
440 | res = DID_ERROR; | ||
441 | break; | ||
442 | |||
443 | case SNIC_STAT_SGL_INVALID: /* Req was aborted to due to sgl error*/ | ||
444 | atomic64_inc(&snic->s_stats.misc.sgl_inval); | ||
445 | res = DID_ERROR; | ||
446 | break; | ||
447 | |||
448 | case SNIC_STAT_FW_ERR: /* Req terminated due to FW Error */ | ||
449 | atomic64_inc(&snic->s_stats.fw.io_errs); | ||
450 | res = DID_ERROR; | ||
451 | break; | ||
452 | |||
453 | case SNIC_STAT_SCSI_ERR: /* FW hits SCSI Error */ | ||
454 | atomic64_inc(&snic->s_stats.fw.scsi_errs); | ||
455 | break; | ||
456 | |||
457 | case SNIC_STAT_NOT_READY: /* XPT yet to initialize */ | ||
458 | case SNIC_STAT_DEV_OFFLINE: /* Device offline */ | ||
459 | res = DID_NO_CONNECT; | ||
460 | break; | ||
461 | |||
462 | case SNIC_STAT_INVALID_HDR: /* Hdr contains invalid data */ | ||
463 | case SNIC_STAT_INVALID_PARM: /* Some param in req is invalid */ | ||
464 | case SNIC_STAT_REQ_NOT_SUP: /* Req type is not supported */ | ||
465 | case SNIC_STAT_CMND_REJECT: /* Req rejected */ | ||
466 | case SNIC_STAT_FATAL_ERROR: /* XPT Error */ | ||
467 | default: | ||
468 | SNIC_SCSI_DBG(snic->shost, | ||
469 | "Invalid Hdr/Param or Req Not Supported or Cmnd Rejected or Device Offline. or Unknown\n"); | ||
470 | res = DID_ERROR; | ||
471 | break; | ||
472 | } | ||
473 | |||
474 | SNIC_HOST_ERR(snic->shost, "fw returns failed status %s flags 0x%llx\n", | ||
475 | snic_io_status_to_str(cmpl_stat), CMD_FLAGS(sc)); | ||
476 | |||
477 | /* Set sc->result */ | ||
478 | sc->result = (res << 16) | icmnd_cmpl->scsi_status; | ||
479 | } /* end of snic_process_io_failed_state */ | ||
480 | |||
481 | /* | ||
482 | * snic_tmreq_pending : is task management in progress. | ||
483 | */ | ||
484 | static int | ||
485 | snic_tmreq_pending(struct scsi_cmnd *sc) | ||
486 | { | ||
487 | int state = CMD_STATE(sc); | ||
488 | |||
489 | return ((state == SNIC_IOREQ_ABTS_PENDING) || | ||
490 | (state == SNIC_IOREQ_LR_PENDING)); | ||
491 | } | ||
492 | |||
493 | /* | ||
494 | * snic_process_icmnd_cmpl_status: | ||
495 | * Caller should hold io_lock | ||
496 | */ | ||
497 | static int | ||
498 | snic_process_icmnd_cmpl_status(struct snic *snic, | ||
499 | struct snic_icmnd_cmpl *icmnd_cmpl, | ||
500 | u8 cmpl_stat, | ||
501 | struct scsi_cmnd *sc) | ||
502 | { | ||
503 | u8 scsi_stat = icmnd_cmpl->scsi_status; | ||
504 | u64 xfer_len = 0; | ||
505 | int ret = 0; | ||
506 | |||
507 | /* Mark the IO as complete */ | ||
508 | CMD_STATE(sc) = SNIC_IOREQ_COMPLETE; | ||
509 | |||
510 | if (likely(cmpl_stat == SNIC_STAT_IO_SUCCESS)) { | ||
511 | sc->result = (DID_OK << 16) | scsi_stat; | ||
512 | |||
513 | xfer_len = scsi_bufflen(sc); | ||
514 | |||
515 | /* Update SCSI Cmd with resid value */ | ||
516 | scsi_set_resid(sc, le32_to_cpu(icmnd_cmpl->resid)); | ||
517 | |||
518 | if (icmnd_cmpl->flags & SNIC_ICMND_CMPL_UNDR_RUN) { | ||
519 | xfer_len -= le32_to_cpu(icmnd_cmpl->resid); | ||
520 | atomic64_inc(&snic->s_stats.misc.io_under_run); | ||
521 | } | ||
522 | |||
523 | if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL) | ||
524 | atomic64_inc(&snic->s_stats.misc.qfull); | ||
525 | |||
526 | ret = 0; | ||
527 | } else { | ||
528 | snic_process_io_failed_state(snic, icmnd_cmpl, sc, cmpl_stat); | ||
529 | atomic64_inc(&snic->s_stats.io.fail); | ||
530 | SNIC_HOST_ERR(snic->shost, | ||
531 | "icmnd_cmpl: IO Failed : Hdr Status %s flags 0x%llx\n", | ||
532 | snic_io_status_to_str(cmpl_stat), CMD_FLAGS(sc)); | ||
533 | ret = 1; | ||
534 | } | ||
535 | |||
536 | return ret; | ||
537 | } /* end of snic_process_icmnd_cmpl_status */ | ||
538 | |||
539 | |||
540 | /* | ||
541 | * snic_icmnd_cmpl_handler | ||
542 | * Routine to handle icmnd completions | ||
543 | */ | ||
544 | static void | ||
545 | snic_icmnd_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq) | ||
546 | { | ||
547 | u8 typ, hdr_stat; | ||
548 | u32 cmnd_id, hid; | ||
549 | ulong ctx; | ||
550 | struct scsi_cmnd *sc = NULL; | ||
551 | struct snic_icmnd_cmpl *icmnd_cmpl = NULL; | ||
552 | struct snic_host_req *req = NULL; | ||
553 | struct snic_req_info *rqi = NULL; | ||
554 | unsigned long flags, start_time; | ||
555 | spinlock_t *io_lock; | ||
556 | u8 sc_stat = 0; | ||
557 | |||
558 | snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx); | ||
559 | icmnd_cmpl = &fwreq->u.icmnd_cmpl; | ||
560 | sc_stat = icmnd_cmpl->scsi_status; | ||
561 | |||
562 | SNIC_SCSI_DBG(snic->shost, | ||
563 | "Icmnd_cmpl: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x,i ctx = %lx\n", | ||
564 | typ, hdr_stat, cmnd_id, hid, ctx); | ||
565 | |||
566 | if (cmnd_id >= snic->max_tag_id) { | ||
567 | SNIC_HOST_ERR(snic->shost, | ||
568 | "Icmnd_cmpl:Tag Error:Out of Range Tag %d, hdr status = %s\n", | ||
569 | cmnd_id, snic_io_status_to_str(hdr_stat)); | ||
570 | return; | ||
571 | } | ||
572 | |||
573 | sc = scsi_host_find_tag(snic->shost, cmnd_id); | ||
574 | WARN_ON_ONCE(!sc); | ||
575 | |||
576 | if (!sc) { | ||
577 | atomic64_inc(&snic->s_stats.io.sc_null); | ||
578 | SNIC_HOST_ERR(snic->shost, | ||
579 | "Icmnd_cmpl: Scsi Cmnd Not found, sc = NULL Hdr Status = %s tag = 0x%x fwreq = 0x%p\n", | ||
580 | snic_io_status_to_str(hdr_stat), | ||
581 | cmnd_id, | ||
582 | fwreq); | ||
583 | |||
584 | SNIC_TRC(snic->shost->host_no, cmnd_id, 0, | ||
585 | ((u64)hdr_stat << 16 | | ||
586 | (u64)sc_stat << 8 | (u64)icmnd_cmpl->flags), | ||
587 | (ulong) fwreq, le32_to_cpu(icmnd_cmpl->resid), ctx); | ||
588 | |||
589 | return; | ||
590 | } | ||
591 | |||
592 | io_lock = snic_io_lock_hash(snic, sc); | ||
593 | |||
594 | spin_lock_irqsave(io_lock, flags); | ||
595 | rqi = (struct snic_req_info *) CMD_SP(sc); | ||
596 | SNIC_SCSI_DBG(snic->shost, | ||
597 | "Icmnd_cmpl:lun %lld sc %p cmd %xtag %d flags 0x%llx rqi %p\n", | ||
598 | sc->device->lun, sc, sc->cmnd[0], snic_cmd_tag(sc), | ||
599 | CMD_FLAGS(sc), rqi); | ||
600 | |||
601 | SNIC_BUG_ON(rqi != (struct snic_req_info *)ctx); | ||
602 | WARN_ON_ONCE(req); | ||
603 | if (!rqi) { | ||
604 | atomic64_inc(&snic->s_stats.io.req_null); | ||
605 | CMD_FLAGS(sc) |= SNIC_IO_REQ_NULL; | ||
606 | spin_unlock_irqrestore(io_lock, flags); | ||
607 | |||
608 | SNIC_HOST_ERR(snic->shost, | ||
609 | "Icmnd_cmpl:Host Req Not Found(null), Hdr Status %s, Tag 0x%x, sc 0x%p flags 0x%llx\n", | ||
610 | snic_io_status_to_str(hdr_stat), | ||
611 | cmnd_id, sc, CMD_FLAGS(sc)); | ||
612 | return; | ||
613 | } | ||
614 | |||
615 | rqi = (struct snic_req_info *) ctx; | ||
616 | start_time = rqi->start_time; | ||
617 | |||
618 | /* firmware completed the io */ | ||
619 | rqi->io_cmpl = 1; | ||
620 | |||
621 | /* | ||
622 | * if SCSI-ML has already issued abort on this command, | ||
623 | * ignore completion of the IO. The abts path will clean it up | ||
624 | */ | ||
625 | if (unlikely(snic_tmreq_pending(sc))) { | ||
626 | snic_proc_tmreq_pending_state(snic, sc, hdr_stat); | ||
627 | spin_unlock_irqrestore(io_lock, flags); | ||
628 | |||
629 | snic_stats_update_io_cmpl(&snic->s_stats); | ||
630 | |||
631 | /* Expected value is SNIC_STAT_ABORTED */ | ||
632 | if (likely(hdr_stat == SNIC_STAT_ABORTED)) | ||
633 | return; | ||
634 | |||
635 | SNIC_SCSI_DBG(snic->shost, | ||
636 | "icmnd_cmpl:TM Req Pending(%s), Hdr Status %s sc 0x%p scsi status %x resid %d flags 0x%llx\n", | ||
637 | snic_ioreq_state_to_str(CMD_STATE(sc)), | ||
638 | snic_io_status_to_str(hdr_stat), | ||
639 | sc, sc_stat, le32_to_cpu(icmnd_cmpl->resid), | ||
640 | CMD_FLAGS(sc)); | ||
641 | |||
642 | SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc, | ||
643 | jiffies_to_msecs(jiffies - start_time), (ulong) fwreq, | ||
644 | SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc)); | ||
645 | |||
646 | return; | ||
647 | } | ||
648 | |||
649 | if (snic_process_icmnd_cmpl_status(snic, icmnd_cmpl, hdr_stat, sc)) { | ||
650 | scsi_print_command(sc); | ||
651 | SNIC_HOST_ERR(snic->shost, | ||
652 | "icmnd_cmpl:IO Failed, sc 0x%p Tag %d Cmd %x Hdr Status %s flags 0x%llx\n", | ||
653 | sc, sc->cmnd[0], cmnd_id, | ||
654 | snic_io_status_to_str(hdr_stat), CMD_FLAGS(sc)); | ||
655 | } | ||
656 | |||
657 | /* Break link with the SCSI Command */ | ||
658 | CMD_SP(sc) = NULL; | ||
659 | CMD_FLAGS(sc) |= SNIC_IO_DONE; | ||
660 | |||
661 | spin_unlock_irqrestore(io_lock, flags); | ||
662 | |||
663 | /* For now, consider only successful IO. */ | ||
664 | snic_calc_io_process_time(snic, rqi); | ||
665 | |||
666 | snic_release_req_buf(snic, rqi, sc); | ||
667 | |||
668 | SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc, | ||
669 | jiffies_to_msecs(jiffies - start_time), (ulong) fwreq, | ||
670 | SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc)); | ||
671 | |||
672 | |||
673 | if (sc->scsi_done) | ||
674 | sc->scsi_done(sc); | ||
675 | |||
676 | snic_stats_update_io_cmpl(&snic->s_stats); | ||
677 | } /* end of snic_icmnd_cmpl_handler */ | ||
678 | |||
679 | static void | ||
680 | snic_proc_dr_cmpl_locked(struct snic *snic, | ||
681 | struct snic_fw_req *fwreq, | ||
682 | u8 cmpl_stat, | ||
683 | u32 cmnd_id, | ||
684 | struct scsi_cmnd *sc) | ||
685 | { | ||
686 | struct snic_req_info *rqi = (struct snic_req_info *) CMD_SP(sc); | ||
687 | u32 start_time = rqi->start_time; | ||
688 | |||
689 | CMD_LR_STATUS(sc) = cmpl_stat; | ||
690 | |||
691 | SNIC_SCSI_DBG(snic->shost, "itmf_cmpl: Cmd State = %s\n", | ||
692 | snic_ioreq_state_to_str(CMD_STATE(sc))); | ||
693 | |||
694 | if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) { | ||
695 | CMD_FLAGS(sc) |= SNIC_DEV_RST_ABTS_PENDING; | ||
696 | |||
697 | SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc, | ||
698 | jiffies_to_msecs(jiffies - start_time), | ||
699 | (ulong) fwreq, 0, SNIC_TRC_CMD_STATE_FLAGS(sc)); | ||
700 | |||
701 | SNIC_SCSI_DBG(snic->shost, | ||
702 | "itmf_cmpl: Terminate Pending Dev Reset Cmpl Recvd.id %x, status %s flags 0x%llx\n", | ||
703 | (int)(cmnd_id & SNIC_TAG_MASK), | ||
704 | snic_io_status_to_str(cmpl_stat), | ||
705 | CMD_FLAGS(sc)); | ||
706 | |||
707 | return; | ||
708 | } | ||
709 | |||
710 | |||
711 | if (CMD_FLAGS(sc) & SNIC_DEV_RST_TIMEDOUT) { | ||
712 | SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc, | ||
713 | jiffies_to_msecs(jiffies - start_time), | ||
714 | (ulong) fwreq, 0, SNIC_TRC_CMD_STATE_FLAGS(sc)); | ||
715 | |||
716 | SNIC_SCSI_DBG(snic->shost, | ||
717 | "itmf_cmpl:Dev Reset Completion Received after timeout. id %d cmpl status %s flags 0x%llx\n", | ||
718 | (int)(cmnd_id & SNIC_TAG_MASK), | ||
719 | snic_io_status_to_str(cmpl_stat), | ||
720 | CMD_FLAGS(sc)); | ||
721 | |||
722 | return; | ||
723 | } | ||
724 | |||
725 | CMD_STATE(sc) = SNIC_IOREQ_LR_COMPLETE; | ||
726 | CMD_FLAGS(sc) |= SNIC_DEV_RST_DONE; | ||
727 | |||
728 | SNIC_SCSI_DBG(snic->shost, | ||
729 | "itmf_cmpl:Dev Reset Cmpl Recvd id %d cmpl status %s flags 0x%llx\n", | ||
730 | (int)(cmnd_id & SNIC_TAG_MASK), | ||
731 | snic_io_status_to_str(cmpl_stat), | ||
732 | CMD_FLAGS(sc)); | ||
733 | |||
734 | if (rqi->dr_done) | ||
735 | complete(rqi->dr_done); | ||
736 | } /* end of snic_proc_dr_cmpl_locked */ | ||
737 | |||
738 | /* | ||
739 | * snic_update_abort_stats : Updates abort stats based on completion status. | ||
740 | */ | ||
741 | static void | ||
742 | snic_update_abort_stats(struct snic *snic, u8 cmpl_stat) | ||
743 | { | ||
744 | struct snic_abort_stats *abt_stats = &snic->s_stats.abts; | ||
745 | |||
746 | SNIC_SCSI_DBG(snic->shost, "Updating Abort stats.\n"); | ||
747 | |||
748 | switch (cmpl_stat) { | ||
749 | case SNIC_STAT_IO_SUCCESS: | ||
750 | break; | ||
751 | |||
752 | case SNIC_STAT_TIMEOUT: | ||
753 | atomic64_inc(&abt_stats->fw_tmo); | ||
754 | break; | ||
755 | |||
756 | case SNIC_STAT_IO_NOT_FOUND: | ||
757 | atomic64_inc(&abt_stats->io_not_found); | ||
758 | break; | ||
759 | |||
760 | default: | ||
761 | atomic64_inc(&abt_stats->fail); | ||
762 | break; | ||
763 | } | ||
764 | } | ||
765 | |||
766 | static int | ||
767 | snic_process_itmf_cmpl(struct snic *snic, | ||
768 | struct snic_fw_req *fwreq, | ||
769 | u32 cmnd_id, | ||
770 | u8 cmpl_stat, | ||
771 | struct scsi_cmnd *sc) | ||
772 | { | ||
773 | struct snic_req_info *rqi = NULL; | ||
774 | u32 tm_tags = 0; | ||
775 | spinlock_t *io_lock = NULL; | ||
776 | unsigned long flags; | ||
777 | u32 start_time = 0; | ||
778 | int ret = 0; | ||
779 | |||
780 | io_lock = snic_io_lock_hash(snic, sc); | ||
781 | spin_lock_irqsave(io_lock, flags); | ||
782 | rqi = (struct snic_req_info *) CMD_SP(sc); | ||
783 | WARN_ON_ONCE(!rqi); | ||
784 | |||
785 | if (!rqi) { | ||
786 | atomic64_inc(&snic->s_stats.io.req_null); | ||
787 | spin_unlock_irqrestore(io_lock, flags); | ||
788 | CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL; | ||
789 | SNIC_HOST_ERR(snic->shost, | ||
790 | "itmf_cmpl: rqi is null,Hdr stat = %s Tag = 0x%x sc = 0x%p flags 0x%llx\n", | ||
791 | snic_io_status_to_str(cmpl_stat), cmnd_id, sc, | ||
792 | CMD_FLAGS(sc)); | ||
793 | |||
794 | return ret; | ||
795 | } | ||
796 | |||
797 | /* Extract task management flags */ | ||
798 | tm_tags = cmnd_id & ~(SNIC_TAG_MASK); | ||
799 | |||
800 | start_time = rqi->start_time; | ||
801 | cmnd_id &= (SNIC_TAG_MASK); | ||
802 | |||
803 | switch (tm_tags) { | ||
804 | case SNIC_TAG_ABORT: | ||
805 | /* Abort only issued on cmd */ | ||
806 | snic_update_abort_stats(snic, cmpl_stat); | ||
807 | |||
808 | if (CMD_STATE(sc) != SNIC_IOREQ_ABTS_PENDING) { | ||
809 | /* This is a late completion. Ignore it. */ | ||
810 | ret = -1; | ||
811 | spin_unlock_irqrestore(io_lock, flags); | ||
812 | break; | ||
813 | } | ||
814 | |||
815 | CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE; | ||
816 | CMD_ABTS_STATUS(sc) = cmpl_stat; | ||
817 | CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_DONE; | ||
818 | |||
819 | SNIC_SCSI_DBG(snic->shost, | ||
820 | "itmf_cmpl:Abort Cmpl Recvd.Tag 0x%x Status %s flags 0x%llx\n", | ||
821 | cmnd_id, | ||
822 | snic_io_status_to_str(cmpl_stat), | ||
823 | CMD_FLAGS(sc)); | ||
824 | |||
825 | /* | ||
826 | * If scsi_eh thread is blocked waiting for abts complete, | ||
827 | * signal completion to it. IO will be cleaned in the thread, | ||
828 | * else clean it in this context. | ||
829 | */ | ||
830 | if (rqi->abts_done) { | ||
831 | complete(rqi->abts_done); | ||
832 | spin_unlock_irqrestore(io_lock, flags); | ||
833 | |||
834 | break; /* jump out */ | ||
835 | } | ||
836 | |||
837 | CMD_SP(sc) = NULL; | ||
838 | sc->result = (DID_ERROR << 16); | ||
839 | SNIC_SCSI_DBG(snic->shost, | ||
840 | "itmf_cmpl: Completing IO. sc %p flags 0x%llx\n", | ||
841 | sc, CMD_FLAGS(sc)); | ||
842 | |||
843 | spin_unlock_irqrestore(io_lock, flags); | ||
844 | |||
845 | snic_release_req_buf(snic, rqi, sc); | ||
846 | |||
847 | if (sc->scsi_done) { | ||
848 | SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc, | ||
849 | jiffies_to_msecs(jiffies - start_time), | ||
850 | (ulong) fwreq, SNIC_TRC_CMD(sc), | ||
851 | SNIC_TRC_CMD_STATE_FLAGS(sc)); | ||
852 | |||
853 | sc->scsi_done(sc); | ||
854 | } | ||
855 | |||
856 | break; | ||
857 | |||
858 | case SNIC_TAG_DEV_RST: | ||
859 | case SNIC_TAG_DEV_RST | SNIC_TAG_IOCTL_DEV_RST: | ||
860 | snic_proc_dr_cmpl_locked(snic, fwreq, cmpl_stat, cmnd_id, sc); | ||
861 | spin_unlock_irqrestore(io_lock, flags); | ||
862 | ret = 0; | ||
863 | |||
864 | break; | ||
865 | |||
866 | case SNIC_TAG_ABORT | SNIC_TAG_DEV_RST: | ||
867 | /* Abort and terminate completion of device reset req */ | ||
868 | |||
869 | CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE; | ||
870 | CMD_ABTS_STATUS(sc) = cmpl_stat; | ||
871 | CMD_FLAGS(sc) |= SNIC_DEV_RST_DONE; | ||
872 | |||
873 | SNIC_SCSI_DBG(snic->shost, | ||
874 | "itmf_cmpl:dev reset abts cmpl recvd. id %d status %s flags 0x%llx\n", | ||
875 | cmnd_id, snic_io_status_to_str(cmpl_stat), | ||
876 | CMD_FLAGS(sc)); | ||
877 | |||
878 | if (rqi->abts_done) | ||
879 | complete(rqi->abts_done); | ||
880 | |||
881 | spin_unlock_irqrestore(io_lock, flags); | ||
882 | |||
883 | break; | ||
884 | |||
885 | default: | ||
886 | spin_unlock_irqrestore(io_lock, flags); | ||
887 | SNIC_HOST_ERR(snic->shost, | ||
888 | "itmf_cmpl: Unknown TM tag bit 0x%x\n", tm_tags); | ||
889 | |||
890 | SNIC_HOST_ERR(snic->shost, | ||
891 | "itmf_cmpl:Unexpected itmf io stat %s Tag = 0x%x flags 0x%llx\n", | ||
892 | snic_ioreq_state_to_str(CMD_STATE(sc)), | ||
893 | cmnd_id, | ||
894 | CMD_FLAGS(sc)); | ||
895 | ret = -1; | ||
896 | SNIC_BUG_ON(1); | ||
897 | |||
898 | break; | ||
899 | } | ||
900 | |||
901 | return ret; | ||
902 | } /* end of snic_process_itmf_cmpl_status */ | ||
903 | |||
904 | /* | ||
905 | * snic_itmf_cmpl_handler. | ||
906 | * Routine to handle itmf completions. | ||
907 | */ | ||
908 | static void | ||
909 | snic_itmf_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq) | ||
910 | { | ||
911 | struct scsi_cmnd *sc = NULL; | ||
912 | struct snic_req_info *rqi = NULL; | ||
913 | struct snic_itmf_cmpl *itmf_cmpl = NULL; | ||
914 | ulong ctx; | ||
915 | u32 cmnd_id; | ||
916 | u32 hid; | ||
917 | u8 typ; | ||
918 | u8 hdr_stat; | ||
919 | |||
920 | snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx); | ||
921 | SNIC_SCSI_DBG(snic->shost, | ||
922 | "Itmf_cmpl: %s: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x,ctx = %lx\n", | ||
923 | __func__, typ, hdr_stat, cmnd_id, hid, ctx); | ||
924 | |||
925 | itmf_cmpl = &fwreq->u.itmf_cmpl; | ||
926 | SNIC_SCSI_DBG(snic->shost, | ||
927 | "Itmf_cmpl: nterm %u , flags 0x%x\n", | ||
928 | le32_to_cpu(itmf_cmpl->nterminated), itmf_cmpl->flags); | ||
929 | |||
930 | /* spl case, dev reset issued through ioctl */ | ||
931 | if (cmnd_id & SNIC_TAG_IOCTL_DEV_RST) { | ||
932 | rqi = (struct snic_req_info *) ctx; | ||
933 | sc = rqi->sc; | ||
934 | |||
935 | goto ioctl_dev_rst; | ||
936 | } | ||
937 | |||
938 | if ((cmnd_id & SNIC_TAG_MASK) >= snic->max_tag_id) { | ||
939 | SNIC_HOST_ERR(snic->shost, | ||
940 | "Itmf_cmpl: Tag 0x%x out of Range,HdrStat %s\n", | ||
941 | cmnd_id, snic_io_status_to_str(hdr_stat)); | ||
942 | SNIC_BUG_ON(1); | ||
943 | |||
944 | return; | ||
945 | } | ||
946 | |||
947 | sc = scsi_host_find_tag(snic->shost, cmnd_id & SNIC_TAG_MASK); | ||
948 | WARN_ON_ONCE(!sc); | ||
949 | |||
950 | ioctl_dev_rst: | ||
951 | if (!sc) { | ||
952 | atomic64_inc(&snic->s_stats.io.sc_null); | ||
953 | SNIC_HOST_ERR(snic->shost, | ||
954 | "Itmf_cmpl: sc is NULL - Hdr Stat %s Tag 0x%x\n", | ||
955 | snic_io_status_to_str(hdr_stat), cmnd_id); | ||
956 | |||
957 | return; | ||
958 | } | ||
959 | |||
960 | snic_process_itmf_cmpl(snic, fwreq, cmnd_id, hdr_stat, sc); | ||
961 | } /* end of snic_itmf_cmpl_handler */ | ||
962 | |||
963 | |||
964 | |||
965 | static void | ||
966 | snic_hba_reset_scsi_cleanup(struct snic *snic, struct scsi_cmnd *sc) | ||
967 | { | ||
968 | struct snic_stats *st = &snic->s_stats; | ||
969 | long act_ios = 0, act_fwreqs = 0; | ||
970 | |||
971 | SNIC_SCSI_DBG(snic->shost, "HBA Reset scsi cleanup.\n"); | ||
972 | snic_scsi_cleanup(snic, snic_cmd_tag(sc)); | ||
973 | |||
974 | /* Update stats on pending IOs */ | ||
975 | act_ios = atomic64_read(&st->io.active); | ||
976 | atomic64_add(act_ios, &st->io.compl); | ||
977 | atomic64_sub(act_ios, &st->io.active); | ||
978 | |||
979 | act_fwreqs = atomic64_read(&st->fw.actv_reqs); | ||
980 | atomic64_sub(act_fwreqs, &st->fw.actv_reqs); | ||
981 | } | ||
982 | |||
983 | /* | ||
984 | * snic_hba_reset_cmpl_handler : | ||
985 | * | ||
986 | * Notes : | ||
987 | * 1. Cleanup all the scsi cmds, release all snic specific cmds | ||
988 | * 2. Issue Report Targets in case of SAN targets | ||
989 | */ | ||
990 | static int | ||
991 | snic_hba_reset_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq) | ||
992 | { | ||
993 | ulong ctx; | ||
994 | u32 cmnd_id; | ||
995 | u32 hid; | ||
996 | u8 typ; | ||
997 | u8 hdr_stat; | ||
998 | struct scsi_cmnd *sc = NULL; | ||
999 | struct snic_req_info *rqi = NULL; | ||
1000 | spinlock_t *io_lock = NULL; | ||
1001 | unsigned long flags, gflags; | ||
1002 | int ret = 0; | ||
1003 | |||
1004 | SNIC_HOST_INFO(snic->shost, | ||
1005 | "reset_cmpl:HBA Reset Completion received.\n"); | ||
1006 | |||
1007 | snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx); | ||
1008 | SNIC_SCSI_DBG(snic->shost, | ||
1009 | "reset_cmpl: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x, ctx = %lx\n", | ||
1010 | typ, hdr_stat, cmnd_id, hid, ctx); | ||
1011 | |||
1012 | /* spl case, host reset issued through ioctl */ | ||
1013 | if (cmnd_id == SCSI_NO_TAG) { | ||
1014 | rqi = (struct snic_req_info *) ctx; | ||
1015 | sc = rqi->sc; | ||
1016 | |||
1017 | goto ioctl_hba_rst; | ||
1018 | } | ||
1019 | |||
1020 | if (cmnd_id >= snic->max_tag_id) { | ||
1021 | SNIC_HOST_ERR(snic->shost, | ||
1022 | "reset_cmpl: Tag 0x%x out of Range,HdrStat %s\n", | ||
1023 | cmnd_id, snic_io_status_to_str(hdr_stat)); | ||
1024 | SNIC_BUG_ON(1); | ||
1025 | |||
1026 | return 1; | ||
1027 | } | ||
1028 | |||
1029 | sc = scsi_host_find_tag(snic->shost, cmnd_id); | ||
1030 | ioctl_hba_rst: | ||
1031 | if (!sc) { | ||
1032 | atomic64_inc(&snic->s_stats.io.sc_null); | ||
1033 | SNIC_HOST_ERR(snic->shost, | ||
1034 | "reset_cmpl: sc is NULL - Hdr Stat %s Tag 0x%x\n", | ||
1035 | snic_io_status_to_str(hdr_stat), cmnd_id); | ||
1036 | ret = 1; | ||
1037 | |||
1038 | return ret; | ||
1039 | } | ||
1040 | |||
1041 | io_lock = snic_io_lock_hash(snic, sc); | ||
1042 | spin_lock_irqsave(io_lock, flags); | ||
1043 | |||
1044 | if (!snic->remove_wait) { | ||
1045 | spin_unlock_irqrestore(io_lock, flags); | ||
1046 | SNIC_HOST_ERR(snic->shost, | ||
1047 | "reset_cmpl:host reset completed after timout\n"); | ||
1048 | ret = 1; | ||
1049 | |||
1050 | return ret; | ||
1051 | } | ||
1052 | |||
1053 | rqi = (struct snic_req_info *) CMD_SP(sc); | ||
1054 | WARN_ON_ONCE(!rqi); | ||
1055 | |||
1056 | if (!rqi) { | ||
1057 | atomic64_inc(&snic->s_stats.io.req_null); | ||
1058 | spin_unlock_irqrestore(io_lock, flags); | ||
1059 | CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL; | ||
1060 | SNIC_HOST_ERR(snic->shost, | ||
1061 | "reset_cmpl: rqi is null,Hdr stat %s Tag 0x%x sc 0x%p flags 0x%llx\n", | ||
1062 | snic_io_status_to_str(hdr_stat), cmnd_id, sc, | ||
1063 | CMD_FLAGS(sc)); | ||
1064 | |||
1065 | ret = 1; | ||
1066 | |||
1067 | return ret; | ||
1068 | } | ||
1069 | /* stats */ | ||
1070 | spin_unlock_irqrestore(io_lock, flags); | ||
1071 | |||
1072 | /* scsi cleanup */ | ||
1073 | snic_hba_reset_scsi_cleanup(snic, sc); | ||
1074 | |||
1075 | SNIC_BUG_ON(snic_get_state(snic) != SNIC_OFFLINE && | ||
1076 | snic_get_state(snic) != SNIC_FWRESET); | ||
1077 | |||
1078 | /* Careful locking between snic_lock and io lock */ | ||
1079 | spin_lock_irqsave(io_lock, flags); | ||
1080 | spin_lock_irqsave(&snic->snic_lock, gflags); | ||
1081 | if (snic_get_state(snic) == SNIC_FWRESET) | ||
1082 | snic_set_state(snic, SNIC_ONLINE); | ||
1083 | spin_unlock_irqrestore(&snic->snic_lock, gflags); | ||
1084 | |||
1085 | if (snic->remove_wait) | ||
1086 | complete(snic->remove_wait); | ||
1087 | |||
1088 | spin_unlock_irqrestore(io_lock, flags); | ||
1089 | atomic64_inc(&snic->s_stats.reset.hba_reset_cmpl); | ||
1090 | |||
1091 | ret = 0; | ||
1092 | /* Rediscovery is for SAN */ | ||
1093 | if (snic->config.xpt_type == SNIC_DAS) | ||
1094 | return ret; | ||
1095 | |||
1096 | SNIC_SCSI_DBG(snic->shost, "reset_cmpl: Queuing discovery work.\n"); | ||
1097 | queue_work(snic_glob->event_q, &snic->disc_work); | ||
1098 | |||
1099 | return ret; | ||
1100 | } | ||
1101 | |||
1102 | static void | ||
1103 | snic_msg_ack_handler(struct snic *snic, struct snic_fw_req *fwreq) | ||
1104 | { | ||
1105 | SNIC_HOST_INFO(snic->shost, "Message Ack Received.\n"); | ||
1106 | |||
1107 | SNIC_ASSERT_NOT_IMPL(1); | ||
1108 | } | ||
1109 | |||
1110 | static void | ||
1111 | snic_aen_handler(struct snic *snic, struct snic_fw_req *fwreq) | ||
1112 | { | ||
1113 | u8 typ, hdr_stat; | ||
1114 | u32 cmnd_id, hid; | ||
1115 | ulong ctx; | ||
1116 | struct snic_async_evnotify *aen = &fwreq->u.async_ev; | ||
1117 | u32 event_id = 0; | ||
1118 | |||
1119 | snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx); | ||
1120 | SNIC_SCSI_DBG(snic->shost, | ||
1121 | "aen: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x, ctx = %lx\n", | ||
1122 | typ, hdr_stat, cmnd_id, hid, ctx); | ||
1123 | |||
1124 | event_id = le32_to_cpu(aen->ev_id); | ||
1125 | |||
1126 | switch (event_id) { | ||
1127 | case SNIC_EV_TGT_OFFLINE: | ||
1128 | SNIC_HOST_INFO(snic->shost, "aen:TGT_OFFLINE Event Recvd.\n"); | ||
1129 | break; | ||
1130 | |||
1131 | case SNIC_EV_TGT_ONLINE: | ||
1132 | SNIC_HOST_INFO(snic->shost, "aen:TGT_ONLINE Event Recvd.\n"); | ||
1133 | break; | ||
1134 | |||
1135 | case SNIC_EV_LUN_OFFLINE: | ||
1136 | SNIC_HOST_INFO(snic->shost, "aen:LUN_OFFLINE Event Recvd.\n"); | ||
1137 | break; | ||
1138 | |||
1139 | case SNIC_EV_LUN_ONLINE: | ||
1140 | SNIC_HOST_INFO(snic->shost, "aen:LUN_ONLINE Event Recvd.\n"); | ||
1141 | break; | ||
1142 | |||
1143 | case SNIC_EV_CONF_CHG: | ||
1144 | SNIC_HOST_INFO(snic->shost, "aen:Config Change Event Recvd.\n"); | ||
1145 | break; | ||
1146 | |||
1147 | case SNIC_EV_TGT_ADDED: | ||
1148 | SNIC_HOST_INFO(snic->shost, "aen:TGT_ADD Event Recvd.\n"); | ||
1149 | break; | ||
1150 | |||
1151 | case SNIC_EV_TGT_DELTD: | ||
1152 | SNIC_HOST_INFO(snic->shost, "aen:TGT_DEL Event Recvd.\n"); | ||
1153 | break; | ||
1154 | |||
1155 | case SNIC_EV_LUN_ADDED: | ||
1156 | SNIC_HOST_INFO(snic->shost, "aen:LUN_ADD Event Recvd.\n"); | ||
1157 | break; | ||
1158 | |||
1159 | case SNIC_EV_LUN_DELTD: | ||
1160 | SNIC_HOST_INFO(snic->shost, "aen:LUN_DEL Event Recvd.\n"); | ||
1161 | break; | ||
1162 | |||
1163 | case SNIC_EV_DISC_CMPL: | ||
1164 | SNIC_HOST_INFO(snic->shost, "aen:DISC_CMPL Event Recvd.\n"); | ||
1165 | break; | ||
1166 | |||
1167 | default: | ||
1168 | SNIC_HOST_INFO(snic->shost, "aen:Unknown Event Recvd.\n"); | ||
1169 | SNIC_BUG_ON(1); | ||
1170 | break; | ||
1171 | } | ||
1172 | |||
1173 | SNIC_ASSERT_NOT_IMPL(1); | ||
1174 | } /* end of snic_aen_handler */ | ||
1175 | |||
1176 | /* | ||
1177 | * snic_io_cmpl_handler | ||
1178 | * Routine to process CQ entries(IO Completions) posted by fw. | ||
1179 | */ | ||
1180 | static int | ||
1181 | snic_io_cmpl_handler(struct vnic_dev *vdev, | ||
1182 | unsigned int cq_idx, | ||
1183 | struct snic_fw_req *fwreq) | ||
1184 | { | ||
1185 | struct snic *snic = svnic_dev_priv(vdev); | ||
1186 | u64 start = jiffies, cmpl_time; | ||
1187 | |||
1188 | snic_print_desc(__func__, (char *)fwreq, sizeof(*fwreq)); | ||
1189 | |||
1190 | /* Update FW Stats */ | ||
1191 | if ((fwreq->hdr.type >= SNIC_RSP_REPORT_TGTS_CMPL) && | ||
1192 | (fwreq->hdr.type <= SNIC_RSP_BOOT_LUNS_CMPL)) | ||
1193 | atomic64_dec(&snic->s_stats.fw.actv_reqs); | ||
1194 | |||
1195 | SNIC_BUG_ON((fwreq->hdr.type > SNIC_RSP_BOOT_LUNS_CMPL) && | ||
1196 | (fwreq->hdr.type < SNIC_MSG_ASYNC_EVNOTIFY)); | ||
1197 | |||
1198 | /* Check for snic subsys errors */ | ||
1199 | switch (fwreq->hdr.status) { | ||
1200 | case SNIC_STAT_NOT_READY: /* XPT yet to initialize */ | ||
1201 | SNIC_HOST_ERR(snic->shost, | ||
1202 | "sNIC SubSystem is NOT Ready.\n"); | ||
1203 | break; | ||
1204 | |||
1205 | case SNIC_STAT_FATAL_ERROR: /* XPT Error */ | ||
1206 | SNIC_HOST_ERR(snic->shost, | ||
1207 | "sNIC SubSystem in Unrecoverable State.\n"); | ||
1208 | break; | ||
1209 | } | ||
1210 | |||
1211 | switch (fwreq->hdr.type) { | ||
1212 | case SNIC_RSP_EXCH_VER_CMPL: | ||
1213 | snic_io_exch_ver_cmpl_handler(snic, fwreq); | ||
1214 | break; | ||
1215 | |||
1216 | case SNIC_RSP_REPORT_TGTS_CMPL: | ||
1217 | snic_report_tgt_cmpl_handler(snic, fwreq); | ||
1218 | break; | ||
1219 | |||
1220 | case SNIC_RSP_ICMND_CMPL: | ||
1221 | snic_icmnd_cmpl_handler(snic, fwreq); | ||
1222 | break; | ||
1223 | |||
1224 | case SNIC_RSP_ITMF_CMPL: | ||
1225 | snic_itmf_cmpl_handler(snic, fwreq); | ||
1226 | break; | ||
1227 | |||
1228 | case SNIC_RSP_HBA_RESET_CMPL: | ||
1229 | snic_hba_reset_cmpl_handler(snic, fwreq); | ||
1230 | break; | ||
1231 | |||
1232 | case SNIC_MSG_ACK: | ||
1233 | snic_msg_ack_handler(snic, fwreq); | ||
1234 | break; | ||
1235 | |||
1236 | case SNIC_MSG_ASYNC_EVNOTIFY: | ||
1237 | snic_aen_handler(snic, fwreq); | ||
1238 | break; | ||
1239 | |||
1240 | default: | ||
1241 | SNIC_BUG_ON(1); | ||
1242 | SNIC_SCSI_DBG(snic->shost, | ||
1243 | "Unknown Firmwqre completion request type %d\n", | ||
1244 | fwreq->hdr.type); | ||
1245 | break; | ||
1246 | } | ||
1247 | |||
1248 | /* Update Stats */ | ||
1249 | cmpl_time = jiffies - start; | ||
1250 | if (cmpl_time > atomic64_read(&snic->s_stats.io.max_cmpl_time)) | ||
1251 | atomic64_set(&snic->s_stats.io.max_cmpl_time, cmpl_time); | ||
1252 | |||
1253 | return 0; | ||
1254 | } /* end of snic_io_cmpl_handler */ | ||
1255 | |||
1256 | /* | ||
1257 | * snic_fwcq_cmpl_handler | ||
1258 | * Routine to process fwCQ | ||
1259 | * This CQ is independent, and not associated with wq/rq/wq_copy queues | ||
1260 | */ | ||
1261 | int | ||
1262 | snic_fwcq_cmpl_handler(struct snic *snic, int io_cmpl_work) | ||
1263 | { | ||
1264 | unsigned int num_ent = 0; /* number cq entries processed */ | ||
1265 | unsigned int cq_idx; | ||
1266 | unsigned int nent_per_cq; | ||
1267 | struct snic_misc_stats *misc_stats = &snic->s_stats.misc; | ||
1268 | |||
1269 | for (cq_idx = snic->wq_count; cq_idx < snic->cq_count; cq_idx++) { | ||
1270 | nent_per_cq = vnic_cq_fw_service(&snic->cq[cq_idx], | ||
1271 | snic_io_cmpl_handler, | ||
1272 | io_cmpl_work); | ||
1273 | num_ent += nent_per_cq; | ||
1274 | |||
1275 | if (nent_per_cq > atomic64_read(&misc_stats->max_cq_ents)) | ||
1276 | atomic64_set(&misc_stats->max_cq_ents, nent_per_cq); | ||
1277 | } | ||
1278 | |||
1279 | return num_ent; | ||
1280 | } /* end of snic_fwcq_cmpl_handler */ | ||
1281 | |||
1282 | /* | ||
1283 | * snic_queue_itmf_req: Common API to queue Task Management requests. | ||
1284 | * Use rqi->tm_tag for passing special tags. | ||
1285 | * @req_id : aborted request's tag, -1 for lun reset. | ||
1286 | */ | ||
1287 | static int | ||
1288 | snic_queue_itmf_req(struct snic *snic, | ||
1289 | struct snic_host_req *tmreq, | ||
1290 | struct scsi_cmnd *sc, | ||
1291 | u32 tmf, | ||
1292 | u32 req_id) | ||
1293 | { | ||
1294 | struct snic_req_info *rqi = req_to_rqi(tmreq); | ||
1295 | struct scsi_lun lun; | ||
1296 | int tm_tag = snic_cmd_tag(sc) | rqi->tm_tag; | ||
1297 | int ret = 0; | ||
1298 | |||
1299 | SNIC_BUG_ON(!rqi); | ||
1300 | SNIC_BUG_ON(!rqi->tm_tag); | ||
1301 | |||
1302 | /* fill in lun info */ | ||
1303 | int_to_scsilun(sc->device->lun, &lun); | ||
1304 | |||
1305 | /* Initialize snic_host_req: itmf */ | ||
1306 | snic_itmf_init(tmreq, | ||
1307 | tm_tag, | ||
1308 | snic->config.hid, | ||
1309 | (ulong) rqi, | ||
1310 | 0 /* flags */, | ||
1311 | req_id, /* Command to be aborted. */ | ||
1312 | rqi->tgt_id, | ||
1313 | lun.scsi_lun, | ||
1314 | tmf); | ||
1315 | |||
1316 | /* | ||
1317 | * In case of multiple aborts on same cmd, | ||
1318 | * use try_wait_for_completion and completion_done() to check | ||
1319 | * whether it queues aborts even after completion of abort issued | ||
1320 | * prior.SNIC_BUG_ON(completion_done(&rqi->done)); | ||
1321 | */ | ||
1322 | |||
1323 | ret = snic_queue_wq_desc(snic, tmreq, sizeof(*tmreq)); | ||
1324 | if (ret) | ||
1325 | SNIC_HOST_ERR(snic->shost, | ||
1326 | "qitmf:Queuing ITMF(%d) Req sc %p, rqi %p, req_id %d tag %d Failed, ret = %d\n", | ||
1327 | tmf, sc, rqi, req_id, snic_cmd_tag(sc), ret); | ||
1328 | else | ||
1329 | SNIC_SCSI_DBG(snic->shost, | ||
1330 | "qitmf:Queuing ITMF(%d) Req sc %p, rqi %p, req_id %d, tag %d (req_id)- Success.", | ||
1331 | tmf, sc, rqi, req_id, snic_cmd_tag(sc)); | ||
1332 | |||
1333 | return ret; | ||
1334 | } /* end of snic_queue_itmf_req */ | ||
1335 | |||
1336 | static int | ||
1337 | snic_issue_tm_req(struct snic *snic, | ||
1338 | struct snic_req_info *rqi, | ||
1339 | struct scsi_cmnd *sc, | ||
1340 | int tmf) | ||
1341 | { | ||
1342 | struct snic_host_req *tmreq = NULL; | ||
1343 | int req_id = 0, tag = snic_cmd_tag(sc); | ||
1344 | int ret = 0; | ||
1345 | |||
1346 | if (snic_get_state(snic) == SNIC_FWRESET) | ||
1347 | return -EBUSY; | ||
1348 | |||
1349 | atomic_inc(&snic->ios_inflight); | ||
1350 | |||
1351 | SNIC_SCSI_DBG(snic->shost, | ||
1352 | "issu_tmreq: Task mgmt req %d. rqi %p w/ tag %x\n", | ||
1353 | tmf, rqi, tag); | ||
1354 | |||
1355 | |||
1356 | if (tmf == SNIC_ITMF_LUN_RESET) { | ||
1357 | tmreq = snic_dr_req_init(snic, rqi); | ||
1358 | req_id = SCSI_NO_TAG; | ||
1359 | } else { | ||
1360 | tmreq = snic_abort_req_init(snic, rqi); | ||
1361 | req_id = tag; | ||
1362 | } | ||
1363 | |||
1364 | if (!tmreq) { | ||
1365 | ret = -ENOMEM; | ||
1366 | |||
1367 | goto tmreq_err; | ||
1368 | } | ||
1369 | |||
1370 | ret = snic_queue_itmf_req(snic, tmreq, sc, tmf, req_id); | ||
1371 | if (ret) | ||
1372 | goto tmreq_err; | ||
1373 | |||
1374 | ret = 0; | ||
1375 | |||
1376 | tmreq_err: | ||
1377 | if (ret) { | ||
1378 | SNIC_HOST_ERR(snic->shost, | ||
1379 | "issu_tmreq: Queing ITMF(%d) Req, sc %p rqi %p req_id %d tag %x fails err = %d\n", | ||
1380 | tmf, sc, rqi, req_id, tag, ret); | ||
1381 | } else { | ||
1382 | SNIC_SCSI_DBG(snic->shost, | ||
1383 | "issu_tmreq: Queuing ITMF(%d) Req, sc %p, rqi %p, req_id %d tag %x - Success.\n", | ||
1384 | tmf, sc, rqi, req_id, tag); | ||
1385 | } | ||
1386 | |||
1387 | atomic_dec(&snic->ios_inflight); | ||
1388 | |||
1389 | return ret; | ||
1390 | } | ||
1391 | |||
1392 | /* | ||
1393 | * snic_queue_abort_req : Queues abort req to WQ | ||
1394 | */ | ||
1395 | static int | ||
1396 | snic_queue_abort_req(struct snic *snic, | ||
1397 | struct snic_req_info *rqi, | ||
1398 | struct scsi_cmnd *sc, | ||
1399 | int tmf) | ||
1400 | { | ||
1401 | SNIC_SCSI_DBG(snic->shost, "q_abtreq: sc %p, rqi %p, tag %x, tmf %d\n", | ||
1402 | sc, rqi, snic_cmd_tag(sc), tmf); | ||
1403 | |||
1404 | /* Add special tag for abort */ | ||
1405 | rqi->tm_tag |= SNIC_TAG_ABORT; | ||
1406 | |||
1407 | return snic_issue_tm_req(snic, rqi, sc, tmf); | ||
1408 | } | ||
1409 | |||
1410 | /* | ||
1411 | * snic_abort_finish : called by snic_abort_cmd on queuing abort successfully. | ||
1412 | */ | ||
1413 | static int | ||
1414 | snic_abort_finish(struct snic *snic, struct scsi_cmnd *sc) | ||
1415 | { | ||
1416 | struct snic_req_info *rqi = NULL; | ||
1417 | spinlock_t *io_lock = NULL; | ||
1418 | unsigned long flags; | ||
1419 | int ret = 0, tag = snic_cmd_tag(sc); | ||
1420 | |||
1421 | io_lock = snic_io_lock_hash(snic, sc); | ||
1422 | spin_lock_irqsave(io_lock, flags); | ||
1423 | rqi = (struct snic_req_info *) CMD_SP(sc); | ||
1424 | if (!rqi) { | ||
1425 | atomic64_inc(&snic->s_stats.io.req_null); | ||
1426 | CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL; | ||
1427 | |||
1428 | SNIC_SCSI_DBG(snic->shost, | ||
1429 | "abt_fini:req info is null tag 0x%x, sc 0x%p flags 0x%llx\n", | ||
1430 | tag, sc, CMD_FLAGS(sc)); | ||
1431 | ret = FAILED; | ||
1432 | |||
1433 | goto abort_fail; | ||
1434 | } | ||
1435 | |||
1436 | rqi->abts_done = NULL; | ||
1437 | |||
1438 | ret = FAILED; | ||
1439 | |||
1440 | /* Check the abort status. */ | ||
1441 | switch (CMD_ABTS_STATUS(sc)) { | ||
1442 | case SNIC_INVALID_CODE: | ||
1443 | /* Firmware didn't complete abort req, timedout */ | ||
1444 | CMD_FLAGS(sc) |= SNIC_IO_ABTS_TIMEDOUT; | ||
1445 | atomic64_inc(&snic->s_stats.abts.drv_tmo); | ||
1446 | SNIC_SCSI_DBG(snic->shost, | ||
1447 | "abt_fini:sc %p Tag %x Driver Timeout.flags 0x%llx\n", | ||
1448 | sc, snic_cmd_tag(sc), CMD_FLAGS(sc)); | ||
1449 | /* do not release snic request in timedout case */ | ||
1450 | rqi = NULL; | ||
1451 | |||
1452 | goto abort_fail; | ||
1453 | |||
1454 | case SNIC_STAT_IO_SUCCESS: | ||
1455 | case SNIC_STAT_IO_NOT_FOUND: | ||
1456 | ret = SUCCESS; | ||
1457 | break; | ||
1458 | |||
1459 | default: | ||
1460 | /* Firmware completed abort with error */ | ||
1461 | ret = FAILED; | ||
1462 | break; | ||
1463 | } | ||
1464 | |||
1465 | CMD_SP(sc) = NULL; | ||
1466 | SNIC_HOST_INFO(snic->shost, | ||
1467 | "abt_fini: Tag %x, Cmpl Status %s flags 0x%llx\n", | ||
1468 | tag, snic_io_status_to_str(CMD_ABTS_STATUS(sc)), | ||
1469 | CMD_FLAGS(sc)); | ||
1470 | |||
1471 | abort_fail: | ||
1472 | spin_unlock_irqrestore(io_lock, flags); | ||
1473 | if (rqi) | ||
1474 | snic_release_req_buf(snic, rqi, sc); | ||
1475 | |||
1476 | return ret; | ||
1477 | } /* end of snic_abort_finish */ | ||
1478 | |||
1479 | /* | ||
1480 | * snic_send_abort_and_wait : Issues Abort, and Waits | ||
1481 | */ | ||
1482 | static int | ||
1483 | snic_send_abort_and_wait(struct snic *snic, struct scsi_cmnd *sc) | ||
1484 | { | ||
1485 | struct snic_req_info *rqi = NULL; | ||
1486 | enum snic_ioreq_state sv_state; | ||
1487 | struct snic_tgt *tgt = NULL; | ||
1488 | spinlock_t *io_lock = NULL; | ||
1489 | DECLARE_COMPLETION_ONSTACK(tm_done); | ||
1490 | unsigned long flags; | ||
1491 | int ret = 0, tmf = 0, tag = snic_cmd_tag(sc); | ||
1492 | |||
1493 | tgt = starget_to_tgt(scsi_target(sc->device)); | ||
1494 | if ((snic_tgt_chkready(tgt) != 0) && (tgt->tdata.typ == SNIC_TGT_SAN)) | ||
1495 | tmf = SNIC_ITMF_ABTS_TASK_TERM; | ||
1496 | else | ||
1497 | tmf = SNIC_ITMF_ABTS_TASK; | ||
1498 | |||
1499 | /* stats */ | ||
1500 | |||
1501 | io_lock = snic_io_lock_hash(snic, sc); | ||
1502 | |||
1503 | /* | ||
1504 | * Avoid a race between SCSI issuing the abort and the device | ||
1505 | * completing the command. | ||
1506 | * | ||
1507 | * If the command is already completed by fw_cmpl code, | ||
1508 | * we just return SUCCESS from here. This means that the abort | ||
1509 | * succeeded. In the SCSI ML, since the timeout for command has | ||
1510 | * happend, the completion wont actually complete the command | ||
1511 | * and it will be considered as an aborted command | ||
1512 | * | ||
1513 | * The CMD_SP will not be cleared except while holding io_lock | ||
1514 | */ | ||
1515 | spin_lock_irqsave(io_lock, flags); | ||
1516 | rqi = (struct snic_req_info *) CMD_SP(sc); | ||
1517 | if (!rqi) { | ||
1518 | spin_unlock_irqrestore(io_lock, flags); | ||
1519 | |||
1520 | SNIC_HOST_ERR(snic->shost, | ||
1521 | "abt_cmd: rqi is null. Tag %d flags 0x%llx\n", | ||
1522 | tag, CMD_FLAGS(sc)); | ||
1523 | |||
1524 | ret = SUCCESS; | ||
1525 | |||
1526 | goto send_abts_end; | ||
1527 | } | ||
1528 | |||
1529 | rqi->abts_done = &tm_done; | ||
1530 | if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) { | ||
1531 | spin_unlock_irqrestore(io_lock, flags); | ||
1532 | |||
1533 | ret = 0; | ||
1534 | goto abts_pending; | ||
1535 | } | ||
1536 | SNIC_BUG_ON(!rqi->abts_done); | ||
1537 | |||
1538 | /* Save Command State, should be restored on failed to Queue. */ | ||
1539 | sv_state = CMD_STATE(sc); | ||
1540 | |||
1541 | /* | ||
1542 | * Command is still pending, need to abort it | ||
1543 | * If the fw completes the command after this point, | ||
1544 | * the completion won't be done till mid-layer, since abot | ||
1545 | * has already started. | ||
1546 | */ | ||
1547 | CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING; | ||
1548 | CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE; | ||
1549 | |||
1550 | SNIC_SCSI_DBG(snic->shost, "send_abt_cmd: TAG 0x%x\n", tag); | ||
1551 | |||
1552 | spin_unlock_irqrestore(io_lock, flags); | ||
1553 | |||
1554 | /* Now Queue the abort command to firmware */ | ||
1555 | ret = snic_queue_abort_req(snic, rqi, sc, tmf); | ||
1556 | if (ret) { | ||
1557 | SNIC_HOST_ERR(snic->shost, | ||
1558 | "send_abt_cmd: IO w/ Tag 0x%x fail w/ err %d flags 0x%llx\n", | ||
1559 | tag, ret, CMD_FLAGS(sc)); | ||
1560 | |||
1561 | spin_lock_irqsave(io_lock, flags); | ||
1562 | /* Restore Command's previous state */ | ||
1563 | CMD_STATE(sc) = sv_state; | ||
1564 | rqi = (struct snic_req_info *) CMD_SP(sc); | ||
1565 | if (rqi) | ||
1566 | rqi->abts_done = NULL; | ||
1567 | spin_unlock_irqrestore(io_lock, flags); | ||
1568 | ret = FAILED; | ||
1569 | |||
1570 | goto send_abts_end; | ||
1571 | } | ||
1572 | |||
1573 | spin_lock_irqsave(io_lock, flags); | ||
1574 | if (tmf == SNIC_ITMF_ABTS_TASK) { | ||
1575 | CMD_FLAGS(sc) |= SNIC_IO_ABTS_ISSUED; | ||
1576 | atomic64_inc(&snic->s_stats.abts.num); | ||
1577 | } else { | ||
1578 | /* term stats */ | ||
1579 | CMD_FLAGS(sc) |= SNIC_IO_TERM_ISSUED; | ||
1580 | } | ||
1581 | spin_unlock_irqrestore(io_lock, flags); | ||
1582 | |||
1583 | SNIC_SCSI_DBG(snic->shost, | ||
1584 | "send_abt_cmd: sc %p Tag %x flags 0x%llx\n", | ||
1585 | sc, tag, CMD_FLAGS(sc)); | ||
1586 | |||
1587 | |||
1588 | ret = 0; | ||
1589 | |||
1590 | abts_pending: | ||
1591 | /* | ||
1592 | * Queued an abort IO, wait for its completion. | ||
1593 | * Once the fw completes the abort command, it will | ||
1594 | * wakeup this thread. | ||
1595 | */ | ||
1596 | wait_for_completion_timeout(&tm_done, SNIC_ABTS_TIMEOUT); | ||
1597 | |||
1598 | send_abts_end: | ||
1599 | return ret; | ||
1600 | } /* end of snic_send_abort_and_wait */ | ||
1601 | |||
1602 | /* | ||
1603 | * This function is exported to SCSI for sending abort cmnds. | ||
1604 | * A SCSI IO is represent by snic_ioreq in the driver. | ||
1605 | * The snic_ioreq is linked to the SCSI Cmd, thus a link with the ULP'S IO | ||
1606 | */ | ||
1607 | int | ||
1608 | snic_abort_cmd(struct scsi_cmnd *sc) | ||
1609 | { | ||
1610 | struct snic *snic = shost_priv(sc->device->host); | ||
1611 | int ret = SUCCESS, tag = snic_cmd_tag(sc); | ||
1612 | u32 start_time = jiffies; | ||
1613 | |||
1614 | SNIC_SCSI_DBG(snic->shost, "abt_cmd:sc %p :0x%x :req = %p :tag = %d\n", | ||
1615 | sc, sc->cmnd[0], sc->request, tag); | ||
1616 | |||
1617 | if (unlikely(snic_get_state(snic) != SNIC_ONLINE)) { | ||
1618 | SNIC_HOST_ERR(snic->shost, | ||
1619 | "abt_cmd: tag %x Parent Devs are not rdy\n", | ||
1620 | tag); | ||
1621 | ret = FAST_IO_FAIL; | ||
1622 | |||
1623 | goto abort_end; | ||
1624 | } | ||
1625 | |||
1626 | |||
1627 | ret = snic_send_abort_and_wait(snic, sc); | ||
1628 | if (ret) | ||
1629 | goto abort_end; | ||
1630 | |||
1631 | ret = snic_abort_finish(snic, sc); | ||
1632 | |||
1633 | abort_end: | ||
1634 | SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, | ||
1635 | jiffies_to_msecs(jiffies - start_time), 0, | ||
1636 | SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc)); | ||
1637 | |||
1638 | SNIC_SCSI_DBG(snic->shost, | ||
1639 | "abts: Abort Req Status = %s\n", | ||
1640 | (ret == SUCCESS) ? "SUCCESS" : | ||
1641 | ((ret == FAST_IO_FAIL) ? "FAST_IO_FAIL" : "FAILED")); | ||
1642 | |||
1643 | return ret; | ||
1644 | } | ||
1645 | |||
1646 | |||
1647 | |||
1648 | static int | ||
1649 | snic_is_abts_pending(struct snic *snic, struct scsi_cmnd *lr_sc) | ||
1650 | { | ||
1651 | struct snic_req_info *rqi = NULL; | ||
1652 | struct scsi_cmnd *sc = NULL; | ||
1653 | struct scsi_device *lr_sdev = NULL; | ||
1654 | spinlock_t *io_lock = NULL; | ||
1655 | u32 tag; | ||
1656 | unsigned long flags; | ||
1657 | |||
1658 | if (lr_sc) | ||
1659 | lr_sdev = lr_sc->device; | ||
1660 | |||
1661 | /* walk through the tag map, an dcheck if IOs are still pending in fw*/ | ||
1662 | for (tag = 0; tag < snic->max_tag_id; tag++) { | ||
1663 | io_lock = snic_io_lock_tag(snic, tag); | ||
1664 | |||
1665 | spin_lock_irqsave(io_lock, flags); | ||
1666 | sc = scsi_host_find_tag(snic->shost, tag); | ||
1667 | |||
1668 | if (!sc || (lr_sc && (sc->device != lr_sdev || sc == lr_sc))) { | ||
1669 | spin_unlock_irqrestore(io_lock, flags); | ||
1670 | |||
1671 | continue; | ||
1672 | } | ||
1673 | |||
1674 | rqi = (struct snic_req_info *) CMD_SP(sc); | ||
1675 | if (!rqi) { | ||
1676 | spin_unlock_irqrestore(io_lock, flags); | ||
1677 | |||
1678 | continue; | ||
1679 | } | ||
1680 | |||
1681 | /* | ||
1682 | * Found IO that is still pending w/ firmware and belongs to | ||
1683 | * the LUN that is under reset, if lr_sc != NULL | ||
1684 | */ | ||
1685 | SNIC_SCSI_DBG(snic->shost, "Found IO in %s on LUN\n", | ||
1686 | snic_ioreq_state_to_str(CMD_STATE(sc))); | ||
1687 | |||
1688 | if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) { | ||
1689 | spin_unlock_irqrestore(io_lock, flags); | ||
1690 | |||
1691 | return 1; | ||
1692 | } | ||
1693 | |||
1694 | spin_unlock_irqrestore(io_lock, flags); | ||
1695 | } | ||
1696 | |||
1697 | return 0; | ||
1698 | } /* end of snic_is_abts_pending */ | ||
1699 | |||
1700 | static int | ||
1701 | snic_dr_clean_single_req(struct snic *snic, | ||
1702 | u32 tag, | ||
1703 | struct scsi_device *lr_sdev) | ||
1704 | { | ||
1705 | struct snic_req_info *rqi = NULL; | ||
1706 | struct snic_tgt *tgt = NULL; | ||
1707 | struct scsi_cmnd *sc = NULL; | ||
1708 | spinlock_t *io_lock = NULL; | ||
1709 | u32 sv_state = 0, tmf = 0; | ||
1710 | DECLARE_COMPLETION_ONSTACK(tm_done); | ||
1711 | unsigned long flags; | ||
1712 | int ret = 0; | ||
1713 | |||
1714 | io_lock = snic_io_lock_tag(snic, tag); | ||
1715 | spin_lock_irqsave(io_lock, flags); | ||
1716 | sc = scsi_host_find_tag(snic->shost, tag); | ||
1717 | |||
1718 | /* Ignore Cmd that don't belong to Lun Reset device */ | ||
1719 | if (!sc || sc->device != lr_sdev) | ||
1720 | goto skip_clean; | ||
1721 | |||
1722 | rqi = (struct snic_req_info *) CMD_SP(sc); | ||
1723 | |||
1724 | if (!rqi) | ||
1725 | goto skip_clean; | ||
1726 | |||
1727 | |||
1728 | if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) | ||
1729 | goto skip_clean; | ||
1730 | |||
1731 | |||
1732 | if ((CMD_FLAGS(sc) & SNIC_DEVICE_RESET) && | ||
1733 | (!(CMD_FLAGS(sc) & SNIC_DEV_RST_ISSUED))) { | ||
1734 | |||
1735 | SNIC_SCSI_DBG(snic->shost, | ||
1736 | "clean_single_req: devrst is not pending sc 0x%p\n", | ||
1737 | sc); | ||
1738 | |||
1739 | goto skip_clean; | ||
1740 | } | ||
1741 | |||
1742 | SNIC_SCSI_DBG(snic->shost, | ||
1743 | "clean_single_req: Found IO in %s on lun\n", | ||
1744 | snic_ioreq_state_to_str(CMD_STATE(sc))); | ||
1745 | |||
1746 | /* Save Command State */ | ||
1747 | sv_state = CMD_STATE(sc); | ||
1748 | |||
1749 | /* | ||
1750 | * Any pending IO issued prior to reset is expected to be | ||
1751 | * in abts pending state, if not we need to set SNIC_IOREQ_ABTS_PENDING | ||
1752 | * to indicate the IO is abort pending. | ||
1753 | * When IO is completed, the IO will be handed over and handled | ||
1754 | * in this function. | ||
1755 | */ | ||
1756 | |||
1757 | CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING; | ||
1758 | SNIC_BUG_ON(rqi->abts_done); | ||
1759 | |||
1760 | if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET) { | ||
1761 | rqi->tm_tag = SNIC_TAG_DEV_RST; | ||
1762 | |||
1763 | SNIC_SCSI_DBG(snic->shost, | ||
1764 | "clean_single_req:devrst sc 0x%p\n", sc); | ||
1765 | } | ||
1766 | |||
1767 | CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE; | ||
1768 | rqi->abts_done = &tm_done; | ||
1769 | spin_unlock_irqrestore(io_lock, flags); | ||
1770 | |||
1771 | tgt = starget_to_tgt(scsi_target(sc->device)); | ||
1772 | if ((snic_tgt_chkready(tgt) != 0) && (tgt->tdata.typ == SNIC_TGT_SAN)) | ||
1773 | tmf = SNIC_ITMF_ABTS_TASK_TERM; | ||
1774 | else | ||
1775 | tmf = SNIC_ITMF_ABTS_TASK; | ||
1776 | |||
1777 | /* Now queue the abort command to firmware */ | ||
1778 | ret = snic_queue_abort_req(snic, rqi, sc, tmf); | ||
1779 | if (ret) { | ||
1780 | SNIC_HOST_ERR(snic->shost, | ||
1781 | "clean_single_req_err:sc %p, tag %d abt failed. tm_tag %d flags 0x%llx\n", | ||
1782 | sc, tag, rqi->tm_tag, CMD_FLAGS(sc)); | ||
1783 | |||
1784 | spin_lock_irqsave(io_lock, flags); | ||
1785 | rqi = (struct snic_req_info *) CMD_SP(sc); | ||
1786 | if (rqi) | ||
1787 | rqi->abts_done = NULL; | ||
1788 | |||
1789 | /* Restore Command State */ | ||
1790 | if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) | ||
1791 | CMD_STATE(sc) = sv_state; | ||
1792 | |||
1793 | ret = 1; | ||
1794 | goto skip_clean; | ||
1795 | } | ||
1796 | |||
1797 | spin_lock_irqsave(io_lock, flags); | ||
1798 | if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET) | ||
1799 | CMD_FLAGS(sc) |= SNIC_DEV_RST_TERM_ISSUED; | ||
1800 | |||
1801 | CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_ISSUED; | ||
1802 | spin_unlock_irqrestore(io_lock, flags); | ||
1803 | |||
1804 | wait_for_completion_timeout(&tm_done, SNIC_ABTS_TIMEOUT); | ||
1805 | |||
1806 | /* Recheck cmd state to check if it now aborted. */ | ||
1807 | spin_lock_irqsave(io_lock, flags); | ||
1808 | rqi = (struct snic_req_info *) CMD_SP(sc); | ||
1809 | if (!rqi) { | ||
1810 | CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL; | ||
1811 | goto skip_clean; | ||
1812 | } | ||
1813 | rqi->abts_done = NULL; | ||
1814 | |||
1815 | /* if abort is still pending w/ fw, fail */ | ||
1816 | if (CMD_ABTS_STATUS(sc) == SNIC_INVALID_CODE) { | ||
1817 | SNIC_HOST_ERR(snic->shost, | ||
1818 | "clean_single_req_err:sc %p tag %d abt still pending w/ fw, tm_tag %d flags 0x%llx\n", | ||
1819 | sc, tag, rqi->tm_tag, CMD_FLAGS(sc)); | ||
1820 | |||
1821 | CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_DONE; | ||
1822 | ret = 1; | ||
1823 | |||
1824 | goto skip_clean; | ||
1825 | } | ||
1826 | |||
1827 | CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE; | ||
1828 | CMD_SP(sc) = NULL; | ||
1829 | spin_unlock_irqrestore(io_lock, flags); | ||
1830 | |||
1831 | snic_release_req_buf(snic, rqi, sc); | ||
1832 | |||
1833 | ret = 0; | ||
1834 | |||
1835 | return ret; | ||
1836 | |||
1837 | skip_clean: | ||
1838 | spin_unlock_irqrestore(io_lock, flags); | ||
1839 | |||
1840 | return ret; | ||
1841 | } /* end of snic_dr_clean_single_req */ | ||
1842 | |||
1843 | static int | ||
1844 | snic_dr_clean_pending_req(struct snic *snic, struct scsi_cmnd *lr_sc) | ||
1845 | { | ||
1846 | struct scsi_device *lr_sdev = lr_sc->device; | ||
1847 | u32 tag = 0; | ||
1848 | int ret = FAILED; | ||
1849 | |||
1850 | for (tag = 0; tag < snic->max_tag_id; tag++) { | ||
1851 | if (tag == snic_cmd_tag(lr_sc)) | ||
1852 | continue; | ||
1853 | |||
1854 | ret = snic_dr_clean_single_req(snic, tag, lr_sdev); | ||
1855 | if (ret) { | ||
1856 | SNIC_HOST_ERR(snic->shost, "clean_err:tag = %d\n", tag); | ||
1857 | |||
1858 | goto clean_err; | ||
1859 | } | ||
1860 | } | ||
1861 | |||
1862 | schedule_timeout(msecs_to_jiffies(100)); | ||
1863 | |||
1864 | /* Walk through all the cmds and check abts status. */ | ||
1865 | if (snic_is_abts_pending(snic, lr_sc)) { | ||
1866 | ret = FAILED; | ||
1867 | |||
1868 | goto clean_err; | ||
1869 | } | ||
1870 | |||
1871 | ret = 0; | ||
1872 | SNIC_SCSI_DBG(snic->shost, "clean_pending_req: Success.\n"); | ||
1873 | |||
1874 | return ret; | ||
1875 | |||
1876 | clean_err: | ||
1877 | ret = FAILED; | ||
1878 | SNIC_HOST_ERR(snic->shost, | ||
1879 | "Failed to Clean Pending IOs on %s device.\n", | ||
1880 | dev_name(&lr_sdev->sdev_gendev)); | ||
1881 | |||
1882 | return ret; | ||
1883 | |||
1884 | } /* end of snic_dr_clean_pending_req */ | ||
1885 | |||
1886 | /* | ||
1887 | * snic_dr_finish : Called by snic_device_reset | ||
1888 | */ | ||
1889 | static int | ||
1890 | snic_dr_finish(struct snic *snic, struct scsi_cmnd *sc) | ||
1891 | { | ||
1892 | struct snic_req_info *rqi = NULL; | ||
1893 | spinlock_t *io_lock = NULL; | ||
1894 | unsigned long flags; | ||
1895 | int lr_res = 0; | ||
1896 | int ret = FAILED; | ||
1897 | |||
1898 | io_lock = snic_io_lock_hash(snic, sc); | ||
1899 | spin_lock_irqsave(io_lock, flags); | ||
1900 | rqi = (struct snic_req_info *) CMD_SP(sc); | ||
1901 | if (!rqi) { | ||
1902 | spin_unlock_irqrestore(io_lock, flags); | ||
1903 | SNIC_SCSI_DBG(snic->shost, | ||
1904 | "dr_fini: rqi is null tag 0x%x sc 0x%p flags 0x%llx\n", | ||
1905 | snic_cmd_tag(sc), sc, CMD_FLAGS(sc)); | ||
1906 | |||
1907 | ret = FAILED; | ||
1908 | goto dr_fini_end; | ||
1909 | } | ||
1910 | |||
1911 | rqi->dr_done = NULL; | ||
1912 | |||
1913 | lr_res = CMD_LR_STATUS(sc); | ||
1914 | |||
1915 | switch (lr_res) { | ||
1916 | case SNIC_INVALID_CODE: | ||
1917 | /* stats */ | ||
1918 | SNIC_SCSI_DBG(snic->shost, | ||
1919 | "dr_fini: Tag %x Dev Reset Timedout. flags 0x%llx\n", | ||
1920 | snic_cmd_tag(sc), CMD_FLAGS(sc)); | ||
1921 | |||
1922 | CMD_FLAGS(sc) |= SNIC_DEV_RST_TIMEDOUT; | ||
1923 | ret = FAILED; | ||
1924 | |||
1925 | goto dr_failed; | ||
1926 | |||
1927 | case SNIC_STAT_IO_SUCCESS: | ||
1928 | SNIC_SCSI_DBG(snic->shost, | ||
1929 | "dr_fini: Tag %x Dev Reset cmpl\n", | ||
1930 | snic_cmd_tag(sc)); | ||
1931 | ret = 0; | ||
1932 | break; | ||
1933 | |||
1934 | default: | ||
1935 | SNIC_HOST_ERR(snic->shost, | ||
1936 | "dr_fini:Device Reset completed& failed.Tag = %x lr_status %s flags 0x%llx\n", | ||
1937 | snic_cmd_tag(sc), | ||
1938 | snic_io_status_to_str(lr_res), CMD_FLAGS(sc)); | ||
1939 | ret = FAILED; | ||
1940 | goto dr_failed; | ||
1941 | } | ||
1942 | spin_unlock_irqrestore(io_lock, flags); | ||
1943 | |||
1944 | /* | ||
1945 | * Cleanup any IOs on this LUN that have still not completed. | ||
1946 | * If any of these fail, then LUN Reset fails. | ||
1947 | * Cleanup cleans all commands on this LUN except | ||
1948 | * the lun reset command. If all cmds get cleaned, the LUN Reset | ||
1949 | * succeeds. | ||
1950 | */ | ||
1951 | |||
1952 | ret = snic_dr_clean_pending_req(snic, sc); | ||
1953 | if (ret) { | ||
1954 | spin_lock_irqsave(io_lock, flags); | ||
1955 | SNIC_SCSI_DBG(snic->shost, | ||
1956 | "dr_fini: Device Reset Failed since could not abort all IOs. Tag = %x.\n", | ||
1957 | snic_cmd_tag(sc)); | ||
1958 | rqi = (struct snic_req_info *) CMD_SP(sc); | ||
1959 | |||
1960 | goto dr_failed; | ||
1961 | } else { | ||
1962 | /* Cleanup LUN Reset Command */ | ||
1963 | spin_lock_irqsave(io_lock, flags); | ||
1964 | rqi = (struct snic_req_info *) CMD_SP(sc); | ||
1965 | if (rqi) | ||
1966 | ret = SUCCESS; /* Completed Successfully */ | ||
1967 | else | ||
1968 | ret = FAILED; | ||
1969 | } | ||
1970 | |||
1971 | dr_failed: | ||
1972 | SNIC_BUG_ON(!spin_is_locked(io_lock)); | ||
1973 | if (rqi) | ||
1974 | CMD_SP(sc) = NULL; | ||
1975 | spin_unlock_irqrestore(io_lock, flags); | ||
1976 | |||
1977 | if (rqi) | ||
1978 | snic_release_req_buf(snic, rqi, sc); | ||
1979 | |||
1980 | dr_fini_end: | ||
1981 | return ret; | ||
1982 | } /* end of snic_dr_finish */ | ||
1983 | |||
1984 | static int | ||
1985 | snic_queue_dr_req(struct snic *snic, | ||
1986 | struct snic_req_info *rqi, | ||
1987 | struct scsi_cmnd *sc) | ||
1988 | { | ||
1989 | /* Add special tag for device reset */ | ||
1990 | rqi->tm_tag |= SNIC_TAG_DEV_RST; | ||
1991 | |||
1992 | return snic_issue_tm_req(snic, rqi, sc, SNIC_ITMF_LUN_RESET); | ||
1993 | } | ||
1994 | |||
1995 | static int | ||
1996 | snic_send_dr_and_wait(struct snic *snic, struct scsi_cmnd *sc) | ||
1997 | { | ||
1998 | struct snic_req_info *rqi = NULL; | ||
1999 | enum snic_ioreq_state sv_state; | ||
2000 | spinlock_t *io_lock = NULL; | ||
2001 | unsigned long flags; | ||
2002 | DECLARE_COMPLETION_ONSTACK(tm_done); | ||
2003 | int ret = FAILED, tag = snic_cmd_tag(sc); | ||
2004 | |||
2005 | io_lock = snic_io_lock_hash(snic, sc); | ||
2006 | spin_lock_irqsave(io_lock, flags); | ||
2007 | CMD_FLAGS(sc) |= SNIC_DEVICE_RESET; | ||
2008 | rqi = (struct snic_req_info *) CMD_SP(sc); | ||
2009 | if (!rqi) { | ||
2010 | SNIC_HOST_ERR(snic->shost, | ||
2011 | "send_dr: rqi is null, Tag 0x%x flags 0x%llx\n", | ||
2012 | tag, CMD_FLAGS(sc)); | ||
2013 | spin_unlock_irqrestore(io_lock, flags); | ||
2014 | |||
2015 | ret = FAILED; | ||
2016 | goto send_dr_end; | ||
2017 | } | ||
2018 | |||
2019 | /* Save Command state to restore in case Queuing failed. */ | ||
2020 | sv_state = CMD_STATE(sc); | ||
2021 | |||
2022 | CMD_STATE(sc) = SNIC_IOREQ_LR_PENDING; | ||
2023 | CMD_LR_STATUS(sc) = SNIC_INVALID_CODE; | ||
2024 | |||
2025 | SNIC_SCSI_DBG(snic->shost, "dr: TAG = %x\n", tag); | ||
2026 | |||
2027 | rqi->dr_done = &tm_done; | ||
2028 | SNIC_BUG_ON(!rqi->dr_done); | ||
2029 | |||
2030 | spin_unlock_irqrestore(io_lock, flags); | ||
2031 | /* | ||
2032 | * The Command state is changed to IOREQ_PENDING, | ||
2033 | * in this case, if the command is completed, the icmnd_cmpl will | ||
2034 | * mark the cmd as completed. | ||
2035 | * This logic still makes LUN Reset is inevitable. | ||
2036 | */ | ||
2037 | |||
2038 | ret = snic_queue_dr_req(snic, rqi, sc); | ||
2039 | if (ret) { | ||
2040 | SNIC_HOST_ERR(snic->shost, | ||
2041 | "send_dr: IO w/ Tag 0x%x Failed err = %d. flags 0x%llx\n", | ||
2042 | tag, ret, CMD_FLAGS(sc)); | ||
2043 | |||
2044 | spin_lock_irqsave(io_lock, flags); | ||
2045 | /* Restore State */ | ||
2046 | CMD_STATE(sc) = sv_state; | ||
2047 | rqi = (struct snic_req_info *) CMD_SP(sc); | ||
2048 | if (rqi) | ||
2049 | rqi->dr_done = NULL; | ||
2050 | /* rqi is freed in caller. */ | ||
2051 | spin_unlock_irqrestore(io_lock, flags); | ||
2052 | ret = FAILED; | ||
2053 | |||
2054 | goto send_dr_end; | ||
2055 | } | ||
2056 | |||
2057 | spin_lock_irqsave(io_lock, flags); | ||
2058 | CMD_FLAGS(sc) |= SNIC_DEV_RST_ISSUED; | ||
2059 | spin_unlock_irqrestore(io_lock, flags); | ||
2060 | |||
2061 | ret = 0; | ||
2062 | |||
2063 | wait_for_completion_timeout(&tm_done, SNIC_LUN_RESET_TIMEOUT); | ||
2064 | |||
2065 | send_dr_end: | ||
2066 | return ret; | ||
2067 | } | ||
2068 | |||
2069 | /* | ||
2070 | * auxillary funciton to check lun reset op is supported or not | ||
2071 | * Not supported if returns 0 | ||
2072 | */ | ||
2073 | static int | ||
2074 | snic_dev_reset_supported(struct scsi_device *sdev) | ||
2075 | { | ||
2076 | struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev)); | ||
2077 | |||
2078 | if (tgt->tdata.typ == SNIC_TGT_DAS) | ||
2079 | return 0; | ||
2080 | |||
2081 | return 1; | ||
2082 | } | ||
2083 | |||
2084 | static void | ||
2085 | snic_unlink_and_release_req(struct snic *snic, struct scsi_cmnd *sc, int flag) | ||
2086 | { | ||
2087 | struct snic_req_info *rqi = NULL; | ||
2088 | spinlock_t *io_lock = NULL; | ||
2089 | unsigned long flags; | ||
2090 | u32 start_time = jiffies; | ||
2091 | |||
2092 | io_lock = snic_io_lock_hash(snic, sc); | ||
2093 | spin_lock_irqsave(io_lock, flags); | ||
2094 | rqi = (struct snic_req_info *) CMD_SP(sc); | ||
2095 | if (rqi) { | ||
2096 | start_time = rqi->start_time; | ||
2097 | CMD_SP(sc) = NULL; | ||
2098 | } | ||
2099 | |||
2100 | CMD_FLAGS(sc) |= flag; | ||
2101 | spin_unlock_irqrestore(io_lock, flags); | ||
2102 | |||
2103 | if (rqi) | ||
2104 | snic_release_req_buf(snic, rqi, sc); | ||
2105 | |||
2106 | SNIC_TRC(snic->shost->host_no, snic_cmd_tag(sc), (ulong) sc, | ||
2107 | jiffies_to_msecs(jiffies - start_time), (ulong) rqi, | ||
2108 | SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc)); | ||
2109 | } | ||
2110 | |||
2111 | /* | ||
2112 | * SCSI Eh thread issues a LUN Reset when one or more commands on a LUN | ||
2113 | * fail to get aborted. It calls driver's eh_device_reset with a SCSI | ||
2114 | * command on the LUN. | ||
2115 | */ | ||
2116 | int | ||
2117 | snic_device_reset(struct scsi_cmnd *sc) | ||
2118 | { | ||
2119 | struct Scsi_Host *shost = sc->device->host; | ||
2120 | struct snic *snic = shost_priv(shost); | ||
2121 | struct snic_req_info *rqi = NULL; | ||
2122 | int tag = snic_cmd_tag(sc); | ||
2123 | int start_time = jiffies; | ||
2124 | int ret = FAILED; | ||
2125 | int dr_supp = 0; | ||
2126 | |||
2127 | SNIC_SCSI_DBG(shost, "dev_reset:sc %p :0x%x :req = %p :tag = %d\n", | ||
2128 | sc, sc->cmnd[0], sc->request, | ||
2129 | snic_cmd_tag(sc)); | ||
2130 | dr_supp = snic_dev_reset_supported(sc->device); | ||
2131 | if (!dr_supp) { | ||
2132 | /* device reset op is not supported */ | ||
2133 | SNIC_HOST_INFO(shost, "LUN Reset Op not supported.\n"); | ||
2134 | snic_unlink_and_release_req(snic, sc, SNIC_DEV_RST_NOTSUP); | ||
2135 | |||
2136 | goto dev_rst_end; | ||
2137 | } | ||
2138 | |||
2139 | if (unlikely(snic_get_state(snic) != SNIC_ONLINE)) { | ||
2140 | snic_unlink_and_release_req(snic, sc, 0); | ||
2141 | SNIC_HOST_ERR(shost, "Devrst: Parent Devs are not online.\n"); | ||
2142 | |||
2143 | goto dev_rst_end; | ||
2144 | } | ||
2145 | |||
2146 | /* There is no tag when lun reset is issue through ioctl. */ | ||
2147 | if (unlikely(tag <= SNIC_NO_TAG)) { | ||
2148 | SNIC_HOST_INFO(snic->shost, | ||
2149 | "Devrst: LUN Reset Recvd thru IOCTL.\n"); | ||
2150 | |||
2151 | rqi = snic_req_init(snic, 0); | ||
2152 | if (!rqi) | ||
2153 | goto dev_rst_end; | ||
2154 | |||
2155 | memset(scsi_cmd_priv(sc), 0, | ||
2156 | sizeof(struct snic_internal_io_state)); | ||
2157 | CMD_SP(sc) = (char *)rqi; | ||
2158 | CMD_FLAGS(sc) = SNIC_NO_FLAGS; | ||
2159 | |||
2160 | /* Add special tag for dr coming from user spc */ | ||
2161 | rqi->tm_tag = SNIC_TAG_IOCTL_DEV_RST; | ||
2162 | rqi->sc = sc; | ||
2163 | } | ||
2164 | |||
2165 | ret = snic_send_dr_and_wait(snic, sc); | ||
2166 | if (ret) { | ||
2167 | SNIC_HOST_ERR(snic->shost, | ||
2168 | "Devrst: IO w/ Tag %x Failed w/ err = %d\n", | ||
2169 | tag, ret); | ||
2170 | |||
2171 | snic_unlink_and_release_req(snic, sc, 0); | ||
2172 | |||
2173 | goto dev_rst_end; | ||
2174 | } | ||
2175 | |||
2176 | ret = snic_dr_finish(snic, sc); | ||
2177 | |||
2178 | dev_rst_end: | ||
2179 | SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, | ||
2180 | jiffies_to_msecs(jiffies - start_time), | ||
2181 | 0, SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc)); | ||
2182 | |||
2183 | SNIC_SCSI_DBG(snic->shost, | ||
2184 | "Devrst: Returning from Device Reset : %s\n", | ||
2185 | (ret == SUCCESS) ? "SUCCESS" : "FAILED"); | ||
2186 | |||
2187 | return ret; | ||
2188 | } /* end of snic_device_reset */ | ||
2189 | |||
2190 | /* | ||
2191 | * SCSI Error handling calls driver's eh_host_reset if all prior | ||
2192 | * error handling levels return FAILED. | ||
2193 | * | ||
2194 | * Host Reset is the highest level of error recovery. If this fails, then | ||
2195 | * host is offlined by SCSI. | ||
2196 | */ | ||
2197 | /* | ||
2198 | * snic_issue_hba_reset : Queues FW Reset Request. | ||
2199 | */ | ||
2200 | static int | ||
2201 | snic_issue_hba_reset(struct snic *snic, struct scsi_cmnd *sc) | ||
2202 | { | ||
2203 | struct snic_req_info *rqi = NULL; | ||
2204 | struct snic_host_req *req = NULL; | ||
2205 | spinlock_t *io_lock = NULL; | ||
2206 | DECLARE_COMPLETION_ONSTACK(wait); | ||
2207 | unsigned long flags; | ||
2208 | int ret = -ENOMEM; | ||
2209 | |||
2210 | rqi = snic_req_init(snic, 0); | ||
2211 | if (!rqi) { | ||
2212 | ret = -ENOMEM; | ||
2213 | |||
2214 | goto hba_rst_end; | ||
2215 | } | ||
2216 | |||
2217 | if (snic_cmd_tag(sc) == SCSI_NO_TAG) { | ||
2218 | memset(scsi_cmd_priv(sc), 0, | ||
2219 | sizeof(struct snic_internal_io_state)); | ||
2220 | SNIC_HOST_INFO(snic->shost, "issu_hr:Host reset thru ioctl.\n"); | ||
2221 | rqi->sc = sc; | ||
2222 | } | ||
2223 | |||
2224 | req = rqi_to_req(rqi); | ||
2225 | |||
2226 | io_lock = snic_io_lock_hash(snic, sc); | ||
2227 | spin_lock_irqsave(io_lock, flags); | ||
2228 | SNIC_BUG_ON(CMD_SP(sc) != NULL); | ||
2229 | CMD_STATE(sc) = SNIC_IOREQ_PENDING; | ||
2230 | CMD_SP(sc) = (char *) rqi; | ||
2231 | CMD_FLAGS(sc) |= SNIC_IO_INITIALIZED; | ||
2232 | snic->remove_wait = &wait; | ||
2233 | spin_unlock_irqrestore(io_lock, flags); | ||
2234 | |||
2235 | /* Initialize Request */ | ||
2236 | snic_io_hdr_enc(&req->hdr, SNIC_REQ_HBA_RESET, 0, snic_cmd_tag(sc), | ||
2237 | snic->config.hid, 0, (ulong) rqi); | ||
2238 | |||
2239 | req->u.reset.flags = 0; | ||
2240 | |||
2241 | ret = snic_queue_wq_desc(snic, req, sizeof(*req)); | ||
2242 | if (ret) { | ||
2243 | SNIC_HOST_ERR(snic->shost, | ||
2244 | "issu_hr:Queuing HBA Reset Failed. w err %d\n", | ||
2245 | ret); | ||
2246 | |||
2247 | goto hba_rst_err; | ||
2248 | } | ||
2249 | |||
2250 | spin_lock_irqsave(io_lock, flags); | ||
2251 | CMD_FLAGS(sc) |= SNIC_HOST_RESET_ISSUED; | ||
2252 | spin_unlock_irqrestore(io_lock, flags); | ||
2253 | atomic64_inc(&snic->s_stats.reset.hba_resets); | ||
2254 | SNIC_HOST_INFO(snic->shost, "Queued HBA Reset Successfully.\n"); | ||
2255 | |||
2256 | wait_for_completion_timeout(snic->remove_wait, | ||
2257 | SNIC_HOST_RESET_TIMEOUT); | ||
2258 | |||
2259 | if (snic_get_state(snic) == SNIC_FWRESET) { | ||
2260 | SNIC_HOST_ERR(snic->shost, "reset_cmpl: Reset Timedout.\n"); | ||
2261 | ret = -ETIMEDOUT; | ||
2262 | |||
2263 | goto hba_rst_err; | ||
2264 | } | ||
2265 | |||
2266 | spin_lock_irqsave(io_lock, flags); | ||
2267 | snic->remove_wait = NULL; | ||
2268 | rqi = (struct snic_req_info *) CMD_SP(sc); | ||
2269 | CMD_SP(sc) = NULL; | ||
2270 | spin_unlock_irqrestore(io_lock, flags); | ||
2271 | |||
2272 | if (rqi) | ||
2273 | snic_req_free(snic, rqi); | ||
2274 | |||
2275 | ret = 0; | ||
2276 | |||
2277 | return ret; | ||
2278 | |||
2279 | hba_rst_err: | ||
2280 | spin_lock_irqsave(io_lock, flags); | ||
2281 | snic->remove_wait = NULL; | ||
2282 | rqi = (struct snic_req_info *) CMD_SP(sc); | ||
2283 | CMD_SP(sc) = NULL; | ||
2284 | spin_unlock_irqrestore(io_lock, flags); | ||
2285 | |||
2286 | if (rqi) | ||
2287 | snic_req_free(snic, rqi); | ||
2288 | |||
2289 | hba_rst_end: | ||
2290 | SNIC_HOST_ERR(snic->shost, | ||
2291 | "reset:HBA Reset Failed w/ err = %d.\n", | ||
2292 | ret); | ||
2293 | |||
2294 | return ret; | ||
2295 | } /* end of snic_issue_hba_reset */ | ||
2296 | |||
2297 | int | ||
2298 | snic_reset(struct Scsi_Host *shost, struct scsi_cmnd *sc) | ||
2299 | { | ||
2300 | struct snic *snic = shost_priv(shost); | ||
2301 | enum snic_state sv_state; | ||
2302 | unsigned long flags; | ||
2303 | int ret = FAILED; | ||
2304 | |||
2305 | /* Set snic state as SNIC_FWRESET*/ | ||
2306 | sv_state = snic_get_state(snic); | ||
2307 | |||
2308 | spin_lock_irqsave(&snic->snic_lock, flags); | ||
2309 | if (snic_get_state(snic) == SNIC_FWRESET) { | ||
2310 | spin_unlock_irqrestore(&snic->snic_lock, flags); | ||
2311 | SNIC_HOST_INFO(shost, "reset:prev reset is in progres\n"); | ||
2312 | |||
2313 | msleep(SNIC_HOST_RESET_TIMEOUT); | ||
2314 | ret = SUCCESS; | ||
2315 | |||
2316 | goto reset_end; | ||
2317 | } | ||
2318 | |||
2319 | snic_set_state(snic, SNIC_FWRESET); | ||
2320 | spin_unlock_irqrestore(&snic->snic_lock, flags); | ||
2321 | |||
2322 | |||
2323 | /* Wait for all the IOs that are entered in Qcmd */ | ||
2324 | while (atomic_read(&snic->ios_inflight)) | ||
2325 | schedule_timeout(msecs_to_jiffies(1)); | ||
2326 | |||
2327 | ret = snic_issue_hba_reset(snic, sc); | ||
2328 | if (ret) { | ||
2329 | SNIC_HOST_ERR(shost, | ||
2330 | "reset:Host Reset Failed w/ err %d.\n", | ||
2331 | ret); | ||
2332 | spin_lock_irqsave(&snic->snic_lock, flags); | ||
2333 | snic_set_state(snic, sv_state); | ||
2334 | spin_unlock_irqrestore(&snic->snic_lock, flags); | ||
2335 | atomic64_inc(&snic->s_stats.reset.hba_reset_fail); | ||
2336 | ret = FAILED; | ||
2337 | |||
2338 | goto reset_end; | ||
2339 | } | ||
2340 | |||
2341 | ret = SUCCESS; | ||
2342 | |||
2343 | reset_end: | ||
2344 | return ret; | ||
2345 | } /* end of snic_reset */ | ||
2346 | |||
2347 | /* | ||
2348 | * SCSI Error handling calls driver's eh_host_reset if all prior | ||
2349 | * error handling levels return FAILED. | ||
2350 | * | ||
2351 | * Host Reset is the highest level of error recovery. If this fails, then | ||
2352 | * host is offlined by SCSI. | ||
2353 | */ | ||
2354 | int | ||
2355 | snic_host_reset(struct scsi_cmnd *sc) | ||
2356 | { | ||
2357 | struct Scsi_Host *shost = sc->device->host; | ||
2358 | u32 start_time = jiffies; | ||
2359 | int ret = FAILED; | ||
2360 | |||
2361 | SNIC_SCSI_DBG(shost, | ||
2362 | "host reset:sc %p sc_cmd 0x%x req %p tag %d flags 0x%llx\n", | ||
2363 | sc, sc->cmnd[0], sc->request, | ||
2364 | snic_cmd_tag(sc), CMD_FLAGS(sc)); | ||
2365 | |||
2366 | ret = snic_reset(shost, sc); | ||
2367 | |||
2368 | SNIC_TRC(shost->host_no, snic_cmd_tag(sc), (ulong) sc, | ||
2369 | jiffies_to_msecs(jiffies - start_time), | ||
2370 | 0, SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc)); | ||
2371 | |||
2372 | return ret; | ||
2373 | } /* end of snic_host_reset */ | ||
2374 | |||
2375 | /* | ||
2376 | * snic_cmpl_pending_tmreq : Caller should hold io_lock | ||
2377 | */ | ||
2378 | static void | ||
2379 | snic_cmpl_pending_tmreq(struct snic *snic, struct scsi_cmnd *sc) | ||
2380 | { | ||
2381 | struct snic_req_info *rqi = NULL; | ||
2382 | |||
2383 | SNIC_SCSI_DBG(snic->shost, | ||
2384 | "Completing Pending TM Req sc %p, state %s flags 0x%llx\n", | ||
2385 | sc, snic_io_status_to_str(CMD_STATE(sc)), CMD_FLAGS(sc)); | ||
2386 | |||
2387 | rqi = (struct snic_req_info *) CMD_SP(sc); | ||
2388 | if (!rqi) | ||
2389 | return; | ||
2390 | |||
2391 | if (rqi->dr_done) | ||
2392 | complete(rqi->dr_done); | ||
2393 | else if (rqi->abts_done) | ||
2394 | complete(rqi->abts_done); | ||
2395 | } | ||
2396 | |||
2397 | /* | ||
2398 | * snic_scsi_cleanup: Walks through tag map and releases the reqs | ||
2399 | */ | ||
2400 | static void | ||
2401 | snic_scsi_cleanup(struct snic *snic, int ex_tag) | ||
2402 | { | ||
2403 | struct snic_req_info *rqi = NULL; | ||
2404 | struct scsi_cmnd *sc = NULL; | ||
2405 | spinlock_t *io_lock = NULL; | ||
2406 | unsigned long flags; | ||
2407 | int tag; | ||
2408 | u64 st_time = 0; | ||
2409 | |||
2410 | SNIC_SCSI_DBG(snic->shost, "sc_clean: scsi cleanup.\n"); | ||
2411 | |||
2412 | for (tag = 0; tag < snic->max_tag_id; tag++) { | ||
2413 | /* Skip ex_tag */ | ||
2414 | if (tag == ex_tag) | ||
2415 | continue; | ||
2416 | |||
2417 | io_lock = snic_io_lock_tag(snic, tag); | ||
2418 | spin_lock_irqsave(io_lock, flags); | ||
2419 | sc = scsi_host_find_tag(snic->shost, tag); | ||
2420 | if (!sc) { | ||
2421 | spin_unlock_irqrestore(io_lock, flags); | ||
2422 | |||
2423 | continue; | ||
2424 | } | ||
2425 | |||
2426 | if (unlikely(snic_tmreq_pending(sc))) { | ||
2427 | /* | ||
2428 | * When FW Completes reset w/o sending completions | ||
2429 | * for outstanding ios. | ||
2430 | */ | ||
2431 | snic_cmpl_pending_tmreq(snic, sc); | ||
2432 | spin_unlock_irqrestore(io_lock, flags); | ||
2433 | |||
2434 | continue; | ||
2435 | } | ||
2436 | |||
2437 | rqi = (struct snic_req_info *) CMD_SP(sc); | ||
2438 | if (!rqi) { | ||
2439 | spin_unlock_irqrestore(io_lock, flags); | ||
2440 | |||
2441 | goto cleanup; | ||
2442 | } | ||
2443 | |||
2444 | SNIC_SCSI_DBG(snic->shost, | ||
2445 | "sc_clean: sc %p, rqi %p, tag %d flags 0x%llx\n", | ||
2446 | sc, rqi, tag, CMD_FLAGS(sc)); | ||
2447 | |||
2448 | CMD_SP(sc) = NULL; | ||
2449 | CMD_FLAGS(sc) |= SNIC_SCSI_CLEANUP; | ||
2450 | spin_unlock_irqrestore(io_lock, flags); | ||
2451 | st_time = rqi->start_time; | ||
2452 | |||
2453 | SNIC_HOST_INFO(snic->shost, | ||
2454 | "sc_clean: Releasing rqi %p : flags 0x%llx\n", | ||
2455 | rqi, CMD_FLAGS(sc)); | ||
2456 | |||
2457 | snic_release_req_buf(snic, rqi, sc); | ||
2458 | |||
2459 | cleanup: | ||
2460 | sc->result = DID_TRANSPORT_DISRUPTED << 16; | ||
2461 | SNIC_HOST_INFO(snic->shost, | ||
2462 | "sc_clean: DID_TRANSPORT_DISRUPTED for sc %p. rqi %p duration %llu msecs\n", | ||
2463 | sc, rqi, (jiffies - st_time)); | ||
2464 | |||
2465 | /* Update IO stats */ | ||
2466 | snic_stats_update_io_cmpl(&snic->s_stats); | ||
2467 | |||
2468 | if (sc->scsi_done) { | ||
2469 | SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, | ||
2470 | jiffies_to_msecs(jiffies - st_time), 0, | ||
2471 | SNIC_TRC_CMD(sc), | ||
2472 | SNIC_TRC_CMD_STATE_FLAGS(sc)); | ||
2473 | |||
2474 | sc->scsi_done(sc); | ||
2475 | } | ||
2476 | } | ||
2477 | } /* end of snic_scsi_cleanup */ | ||
2478 | |||
2479 | void | ||
2480 | snic_shutdown_scsi_cleanup(struct snic *snic) | ||
2481 | { | ||
2482 | SNIC_HOST_INFO(snic->shost, "Shutdown time SCSI Cleanup.\n"); | ||
2483 | |||
2484 | snic_scsi_cleanup(snic, SCSI_NO_TAG); | ||
2485 | } /* end of snic_shutdown_scsi_cleanup */ | ||
2486 | |||
2487 | /* | ||
2488 | * snic_internal_abort_io | ||
2489 | * called by : snic_tgt_scsi_abort_io | ||
2490 | */ | ||
2491 | static int | ||
2492 | snic_internal_abort_io(struct snic *snic, struct scsi_cmnd *sc, int tmf) | ||
2493 | { | ||
2494 | struct snic_req_info *rqi = NULL; | ||
2495 | spinlock_t *io_lock = NULL; | ||
2496 | unsigned long flags; | ||
2497 | u32 sv_state = 0; | ||
2498 | int ret = 0; | ||
2499 | |||
2500 | io_lock = snic_io_lock_hash(snic, sc); | ||
2501 | spin_lock_irqsave(io_lock, flags); | ||
2502 | rqi = (struct snic_req_info *) CMD_SP(sc); | ||
2503 | if (!rqi) | ||
2504 | goto skip_internal_abts; | ||
2505 | |||
2506 | if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) | ||
2507 | goto skip_internal_abts; | ||
2508 | |||
2509 | if ((CMD_FLAGS(sc) & SNIC_DEVICE_RESET) && | ||
2510 | (!(CMD_FLAGS(sc) & SNIC_DEV_RST_ISSUED))) { | ||
2511 | |||
2512 | SNIC_SCSI_DBG(snic->shost, | ||
2513 | "internal_abts: dev rst not pending sc 0x%p\n", | ||
2514 | sc); | ||
2515 | |||
2516 | goto skip_internal_abts; | ||
2517 | } | ||
2518 | |||
2519 | |||
2520 | if (!(CMD_FLAGS(sc) & SNIC_IO_ISSUED)) { | ||
2521 | SNIC_SCSI_DBG(snic->shost, | ||
2522 | "internal_abts: IO not yet issued sc 0x%p tag 0x%x flags 0x%llx state %d\n", | ||
2523 | sc, snic_cmd_tag(sc), CMD_FLAGS(sc), CMD_STATE(sc)); | ||
2524 | |||
2525 | goto skip_internal_abts; | ||
2526 | } | ||
2527 | |||
2528 | sv_state = CMD_STATE(sc); | ||
2529 | CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING; | ||
2530 | CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE; | ||
2531 | CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_PENDING; | ||
2532 | |||
2533 | if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET) { | ||
2534 | /* stats */ | ||
2535 | rqi->tm_tag = SNIC_TAG_DEV_RST; | ||
2536 | SNIC_SCSI_DBG(snic->shost, "internal_abts:dev rst sc %p\n", sc); | ||
2537 | } | ||
2538 | |||
2539 | SNIC_SCSI_DBG(snic->shost, "internal_abts: Issuing abts tag %x\n", | ||
2540 | snic_cmd_tag(sc)); | ||
2541 | SNIC_BUG_ON(rqi->abts_done); | ||
2542 | spin_unlock_irqrestore(io_lock, flags); | ||
2543 | |||
2544 | ret = snic_queue_abort_req(snic, rqi, sc, tmf); | ||
2545 | if (ret) { | ||
2546 | SNIC_HOST_ERR(snic->shost, | ||
2547 | "internal_abts: Tag = %x , Failed w/ err = %d\n", | ||
2548 | snic_cmd_tag(sc), ret); | ||
2549 | |||
2550 | spin_lock_irqsave(io_lock, flags); | ||
2551 | |||
2552 | if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) | ||
2553 | CMD_STATE(sc) = sv_state; | ||
2554 | |||
2555 | goto skip_internal_abts; | ||
2556 | } | ||
2557 | |||
2558 | spin_lock_irqsave(io_lock, flags); | ||
2559 | if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET) | ||
2560 | CMD_FLAGS(sc) |= SNIC_DEV_RST_TERM_ISSUED; | ||
2561 | else | ||
2562 | CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_ISSUED; | ||
2563 | |||
2564 | ret = SUCCESS; | ||
2565 | |||
2566 | skip_internal_abts: | ||
2567 | SNIC_BUG_ON(!spin_is_locked(io_lock)); | ||
2568 | spin_unlock_irqrestore(io_lock, flags); | ||
2569 | |||
2570 | return ret; | ||
2571 | } /* end of snic_internal_abort_io */ | ||
2572 | |||
2573 | /* | ||
2574 | * snic_tgt_scsi_abort_io : called by snic_tgt_del | ||
2575 | */ | ||
2576 | int | ||
2577 | snic_tgt_scsi_abort_io(struct snic_tgt *tgt) | ||
2578 | { | ||
2579 | struct snic *snic = NULL; | ||
2580 | struct scsi_cmnd *sc = NULL; | ||
2581 | struct snic_tgt *sc_tgt = NULL; | ||
2582 | spinlock_t *io_lock = NULL; | ||
2583 | unsigned long flags; | ||
2584 | int ret = 0, tag, abt_cnt = 0, tmf = 0; | ||
2585 | |||
2586 | if (!tgt) | ||
2587 | return -1; | ||
2588 | |||
2589 | snic = shost_priv(snic_tgt_to_shost(tgt)); | ||
2590 | SNIC_SCSI_DBG(snic->shost, "tgt_abt_io: Cleaning Pending IOs.\n"); | ||
2591 | |||
2592 | if (tgt->tdata.typ == SNIC_TGT_DAS) | ||
2593 | tmf = SNIC_ITMF_ABTS_TASK; | ||
2594 | else | ||
2595 | tmf = SNIC_ITMF_ABTS_TASK_TERM; | ||
2596 | |||
2597 | for (tag = 0; tag < snic->max_tag_id; tag++) { | ||
2598 | io_lock = snic_io_lock_tag(snic, tag); | ||
2599 | |||
2600 | spin_lock_irqsave(io_lock, flags); | ||
2601 | sc = scsi_host_find_tag(snic->shost, tag); | ||
2602 | if (!sc) { | ||
2603 | spin_unlock_irqrestore(io_lock, flags); | ||
2604 | |||
2605 | continue; | ||
2606 | } | ||
2607 | |||
2608 | sc_tgt = starget_to_tgt(scsi_target(sc->device)); | ||
2609 | if (sc_tgt != tgt) { | ||
2610 | spin_unlock_irqrestore(io_lock, flags); | ||
2611 | |||
2612 | continue; | ||
2613 | } | ||
2614 | spin_unlock_irqrestore(io_lock, flags); | ||
2615 | |||
2616 | ret = snic_internal_abort_io(snic, sc, tmf); | ||
2617 | if (ret < 0) { | ||
2618 | SNIC_HOST_ERR(snic->shost, | ||
2619 | "tgt_abt_io: Tag %x, Failed w err = %d\n", | ||
2620 | tag, ret); | ||
2621 | |||
2622 | continue; | ||
2623 | } | ||
2624 | |||
2625 | if (ret == SUCCESS) | ||
2626 | abt_cnt++; | ||
2627 | } | ||
2628 | |||
2629 | SNIC_SCSI_DBG(snic->shost, "tgt_abt_io: abt_cnt = %d\n", abt_cnt); | ||
2630 | |||
2631 | return 0; | ||
2632 | } /* end of snic_tgt_scsi_abort_io */ | ||