diff options
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_scsi.c')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.c | 514 |
1 files changed, 463 insertions, 51 deletions
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 1bcebbd3dfac..bd1867411821 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include "lpfc_version.h" | 32 | #include "lpfc_version.h" |
33 | #include "lpfc_hw.h" | 33 | #include "lpfc_hw.h" |
34 | #include "lpfc_sli.h" | 34 | #include "lpfc_sli.h" |
35 | #include "lpfc_nl.h" | ||
35 | #include "lpfc_disc.h" | 36 | #include "lpfc_disc.h" |
36 | #include "lpfc_scsi.h" | 37 | #include "lpfc_scsi.h" |
37 | #include "lpfc.h" | 38 | #include "lpfc.h" |
@@ -42,6 +43,111 @@ | |||
42 | #define LPFC_RESET_WAIT 2 | 43 | #define LPFC_RESET_WAIT 2 |
43 | #define LPFC_ABORT_WAIT 2 | 44 | #define LPFC_ABORT_WAIT 2 |
44 | 45 | ||
46 | /** | ||
47 | * lpfc_update_stats: Update statistical data for the command completion. | ||
48 | * @phba: Pointer to HBA object. | ||
49 | * @lpfc_cmd: lpfc scsi command object pointer. | ||
50 | * | ||
51 | * This function is called when there is a command completion and this | ||
52 | * function updates the statistical data for the command completion. | ||
53 | **/ | ||
54 | static void | ||
55 | lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) | ||
56 | { | ||
57 | struct lpfc_rport_data *rdata = lpfc_cmd->rdata; | ||
58 | struct lpfc_nodelist *pnode = rdata->pnode; | ||
59 | struct scsi_cmnd *cmd = lpfc_cmd->pCmd; | ||
60 | unsigned long flags; | ||
61 | struct Scsi_Host *shost = cmd->device->host; | ||
62 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | ||
63 | unsigned long latency; | ||
64 | int i; | ||
65 | |||
66 | if (cmd->result) | ||
67 | return; | ||
68 | |||
69 | spin_lock_irqsave(shost->host_lock, flags); | ||
70 | if (!vport->stat_data_enabled || | ||
71 | vport->stat_data_blocked || | ||
72 | !pnode->lat_data || | ||
73 | (phba->bucket_type == LPFC_NO_BUCKET)) { | ||
74 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
75 | return; | ||
76 | } | ||
77 | latency = jiffies_to_msecs(jiffies - lpfc_cmd->start_time); | ||
78 | |||
79 | if (phba->bucket_type == LPFC_LINEAR_BUCKET) { | ||
80 | i = (latency + phba->bucket_step - 1 - phba->bucket_base)/ | ||
81 | phba->bucket_step; | ||
82 | if (i >= LPFC_MAX_BUCKET_COUNT) | ||
83 | i = LPFC_MAX_BUCKET_COUNT; | ||
84 | } else { | ||
85 | for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++) | ||
86 | if (latency <= (phba->bucket_base + | ||
87 | ((1<<i)*phba->bucket_step))) | ||
88 | break; | ||
89 | } | ||
90 | |||
91 | pnode->lat_data[i].cmd_count++; | ||
92 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
93 | } | ||
94 | |||
95 | |||
96 | /** | ||
97 | * lpfc_send_sdev_queuedepth_change_event: Posts a queuedepth change | ||
98 | * event. | ||
99 | * @phba: Pointer to HBA context object. | ||
100 | * @vport: Pointer to vport object. | ||
101 | * @ndlp: Pointer to FC node associated with the target. | ||
102 | * @lun: Lun number of the scsi device. | ||
103 | * @old_val: Old value of the queue depth. | ||
104 | * @new_val: New value of the queue depth. | ||
105 | * | ||
106 | * This function sends an event to the mgmt application indicating | ||
107 | * there is a change in the scsi device queue depth. | ||
108 | **/ | ||
109 | static void | ||
110 | lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba, | ||
111 | struct lpfc_vport *vport, | ||
112 | struct lpfc_nodelist *ndlp, | ||
113 | uint32_t lun, | ||
114 | uint32_t old_val, | ||
115 | uint32_t new_val) | ||
116 | { | ||
117 | struct lpfc_fast_path_event *fast_path_evt; | ||
118 | unsigned long flags; | ||
119 | |||
120 | fast_path_evt = lpfc_alloc_fast_evt(phba); | ||
121 | if (!fast_path_evt) | ||
122 | return; | ||
123 | |||
124 | fast_path_evt->un.queue_depth_evt.scsi_event.event_type = | ||
125 | FC_REG_SCSI_EVENT; | ||
126 | fast_path_evt->un.queue_depth_evt.scsi_event.subcategory = | ||
127 | LPFC_EVENT_VARQUEDEPTH; | ||
128 | |||
129 | /* Report all luns with change in queue depth */ | ||
130 | fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun; | ||
131 | if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { | ||
132 | memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn, | ||
133 | &ndlp->nlp_portname, sizeof(struct lpfc_name)); | ||
134 | memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn, | ||
135 | &ndlp->nlp_nodename, sizeof(struct lpfc_name)); | ||
136 | } | ||
137 | |||
138 | fast_path_evt->un.queue_depth_evt.oldval = old_val; | ||
139 | fast_path_evt->un.queue_depth_evt.newval = new_val; | ||
140 | fast_path_evt->vport = vport; | ||
141 | |||
142 | fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; | ||
143 | spin_lock_irqsave(&phba->hbalock, flags); | ||
144 | list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list); | ||
145 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
146 | lpfc_worker_wake_up(phba); | ||
147 | |||
148 | return; | ||
149 | } | ||
150 | |||
45 | /* | 151 | /* |
46 | * This function is called with no lock held when there is a resource | 152 | * This function is called with no lock held when there is a resource |
47 | * error in driver or in firmware. | 153 | * error in driver or in firmware. |
@@ -117,9 +223,10 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) | |||
117 | struct lpfc_vport **vports; | 223 | struct lpfc_vport **vports; |
118 | struct Scsi_Host *shost; | 224 | struct Scsi_Host *shost; |
119 | struct scsi_device *sdev; | 225 | struct scsi_device *sdev; |
120 | unsigned long new_queue_depth; | 226 | unsigned long new_queue_depth, old_queue_depth; |
121 | unsigned long num_rsrc_err, num_cmd_success; | 227 | unsigned long num_rsrc_err, num_cmd_success; |
122 | int i; | 228 | int i; |
229 | struct lpfc_rport_data *rdata; | ||
123 | 230 | ||
124 | num_rsrc_err = atomic_read(&phba->num_rsrc_err); | 231 | num_rsrc_err = atomic_read(&phba->num_rsrc_err); |
125 | num_cmd_success = atomic_read(&phba->num_cmd_success); | 232 | num_cmd_success = atomic_read(&phba->num_cmd_success); |
@@ -137,6 +244,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) | |||
137 | else | 244 | else |
138 | new_queue_depth = sdev->queue_depth - | 245 | new_queue_depth = sdev->queue_depth - |
139 | new_queue_depth; | 246 | new_queue_depth; |
247 | old_queue_depth = sdev->queue_depth; | ||
140 | if (sdev->ordered_tags) | 248 | if (sdev->ordered_tags) |
141 | scsi_adjust_queue_depth(sdev, | 249 | scsi_adjust_queue_depth(sdev, |
142 | MSG_ORDERED_TAG, | 250 | MSG_ORDERED_TAG, |
@@ -145,6 +253,13 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) | |||
145 | scsi_adjust_queue_depth(sdev, | 253 | scsi_adjust_queue_depth(sdev, |
146 | MSG_SIMPLE_TAG, | 254 | MSG_SIMPLE_TAG, |
147 | new_queue_depth); | 255 | new_queue_depth); |
256 | rdata = sdev->hostdata; | ||
257 | if (rdata) | ||
258 | lpfc_send_sdev_queuedepth_change_event( | ||
259 | phba, vports[i], | ||
260 | rdata->pnode, | ||
261 | sdev->lun, old_queue_depth, | ||
262 | new_queue_depth); | ||
148 | } | 263 | } |
149 | } | 264 | } |
150 | lpfc_destroy_vport_work_array(phba, vports); | 265 | lpfc_destroy_vport_work_array(phba, vports); |
@@ -159,6 +274,7 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) | |||
159 | struct Scsi_Host *shost; | 274 | struct Scsi_Host *shost; |
160 | struct scsi_device *sdev; | 275 | struct scsi_device *sdev; |
161 | int i; | 276 | int i; |
277 | struct lpfc_rport_data *rdata; | ||
162 | 278 | ||
163 | vports = lpfc_create_vport_work_array(phba); | 279 | vports = lpfc_create_vport_work_array(phba); |
164 | if (vports != NULL) | 280 | if (vports != NULL) |
@@ -176,6 +292,14 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) | |||
176 | scsi_adjust_queue_depth(sdev, | 292 | scsi_adjust_queue_depth(sdev, |
177 | MSG_SIMPLE_TAG, | 293 | MSG_SIMPLE_TAG, |
178 | sdev->queue_depth+1); | 294 | sdev->queue_depth+1); |
295 | rdata = sdev->hostdata; | ||
296 | if (rdata) | ||
297 | lpfc_send_sdev_queuedepth_change_event( | ||
298 | phba, vports[i], | ||
299 | rdata->pnode, | ||
300 | sdev->lun, | ||
301 | sdev->queue_depth - 1, | ||
302 | sdev->queue_depth); | ||
179 | } | 303 | } |
180 | } | 304 | } |
181 | lpfc_destroy_vport_work_array(phba, vports); | 305 | lpfc_destroy_vport_work_array(phba, vports); |
@@ -183,6 +307,35 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) | |||
183 | atomic_set(&phba->num_cmd_success, 0); | 307 | atomic_set(&phba->num_cmd_success, 0); |
184 | } | 308 | } |
185 | 309 | ||
310 | /** | ||
311 | * lpfc_scsi_dev_block: set all scsi hosts to block state. | ||
312 | * @phba: Pointer to HBA context object. | ||
313 | * | ||
314 | * This function walks vport list and set each SCSI host to block state | ||
315 | * by invoking fc_remote_port_delete() routine. This function is invoked | ||
316 | * with EEH when device's PCI slot has been permanently disabled. | ||
317 | **/ | ||
318 | void | ||
319 | lpfc_scsi_dev_block(struct lpfc_hba *phba) | ||
320 | { | ||
321 | struct lpfc_vport **vports; | ||
322 | struct Scsi_Host *shost; | ||
323 | struct scsi_device *sdev; | ||
324 | struct fc_rport *rport; | ||
325 | int i; | ||
326 | |||
327 | vports = lpfc_create_vport_work_array(phba); | ||
328 | if (vports != NULL) | ||
329 | for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { | ||
330 | shost = lpfc_shost_from_vport(vports[i]); | ||
331 | shost_for_each_device(sdev, shost) { | ||
332 | rport = starget_to_rport(scsi_target(sdev)); | ||
333 | fc_remote_port_delete(rport); | ||
334 | } | ||
335 | } | ||
336 | lpfc_destroy_vport_work_array(phba, vports); | ||
337 | } | ||
338 | |||
186 | /* | 339 | /* |
187 | * This routine allocates a scsi buffer, which contains all the necessary | 340 | * This routine allocates a scsi buffer, which contains all the necessary |
188 | * information needed to initiate a SCSI I/O. The non-DMAable buffer region | 341 | * information needed to initiate a SCSI I/O. The non-DMAable buffer region |
@@ -198,7 +351,9 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport) | |||
198 | struct lpfc_scsi_buf *psb; | 351 | struct lpfc_scsi_buf *psb; |
199 | struct ulp_bde64 *bpl; | 352 | struct ulp_bde64 *bpl; |
200 | IOCB_t *iocb; | 353 | IOCB_t *iocb; |
201 | dma_addr_t pdma_phys; | 354 | dma_addr_t pdma_phys_fcp_cmd; |
355 | dma_addr_t pdma_phys_fcp_rsp; | ||
356 | dma_addr_t pdma_phys_bpl; | ||
202 | uint16_t iotag; | 357 | uint16_t iotag; |
203 | 358 | ||
204 | psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); | 359 | psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); |
@@ -238,40 +393,60 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport) | |||
238 | 393 | ||
239 | /* Initialize local short-hand pointers. */ | 394 | /* Initialize local short-hand pointers. */ |
240 | bpl = psb->fcp_bpl; | 395 | bpl = psb->fcp_bpl; |
241 | pdma_phys = psb->dma_handle; | 396 | pdma_phys_fcp_cmd = psb->dma_handle; |
397 | pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); | ||
398 | pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) + | ||
399 | sizeof(struct fcp_rsp); | ||
242 | 400 | ||
243 | /* | 401 | /* |
244 | * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg | 402 | * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg |
245 | * list bdes. Initialize the first two and leave the rest for | 403 | * list bdes. Initialize the first two and leave the rest for |
246 | * queuecommand. | 404 | * queuecommand. |
247 | */ | 405 | */ |
248 | bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys)); | 406 | bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd)); |
249 | bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys)); | 407 | bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); |
250 | bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd); | 408 | bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); |
251 | bpl->tus.f.bdeFlags = BUFF_USE_CMND; | 409 | bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
252 | bpl->tus.w = le32_to_cpu(bpl->tus.w); | 410 | bpl[0].tus.w = le32_to_cpu(bpl->tus.w); |
253 | bpl++; | ||
254 | 411 | ||
255 | /* Setup the physical region for the FCP RSP */ | 412 | /* Setup the physical region for the FCP RSP */ |
256 | pdma_phys += sizeof (struct fcp_cmnd); | 413 | bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); |
257 | bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys)); | 414 | bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); |
258 | bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys)); | 415 | bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); |
259 | bpl->tus.f.bdeSize = sizeof (struct fcp_rsp); | 416 | bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
260 | bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV); | 417 | bpl[1].tus.w = le32_to_cpu(bpl->tus.w); |
261 | bpl->tus.w = le32_to_cpu(bpl->tus.w); | ||
262 | 418 | ||
263 | /* | 419 | /* |
264 | * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, | 420 | * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, |
265 | * initialize it with all known data now. | 421 | * initialize it with all known data now. |
266 | */ | 422 | */ |
267 | pdma_phys += (sizeof (struct fcp_rsp)); | ||
268 | iocb = &psb->cur_iocbq.iocb; | 423 | iocb = &psb->cur_iocbq.iocb; |
269 | iocb->un.fcpi64.bdl.ulpIoTag32 = 0; | 424 | iocb->un.fcpi64.bdl.ulpIoTag32 = 0; |
270 | iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys); | 425 | if (phba->sli_rev == 3) { |
271 | iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys); | 426 | /* fill in immediate fcp command BDE */ |
272 | iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); | 427 | iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; |
273 | iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL; | 428 | iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); |
274 | iocb->ulpBdeCount = 1; | 429 | iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, |
430 | unsli3.fcp_ext.icd); | ||
431 | iocb->un.fcpi64.bdl.addrHigh = 0; | ||
432 | iocb->ulpBdeCount = 0; | ||
433 | iocb->ulpLe = 0; | ||
434 | /* fill in responce BDE */ | ||
435 | iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; | ||
436 | iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize = | ||
437 | sizeof(struct fcp_rsp); | ||
438 | iocb->unsli3.fcp_ext.rbde.addrLow = | ||
439 | putPaddrLow(pdma_phys_fcp_rsp); | ||
440 | iocb->unsli3.fcp_ext.rbde.addrHigh = | ||
441 | putPaddrHigh(pdma_phys_fcp_rsp); | ||
442 | } else { | ||
443 | iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64; | ||
444 | iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); | ||
445 | iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl); | ||
446 | iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl); | ||
447 | iocb->ulpBdeCount = 1; | ||
448 | iocb->ulpLe = 1; | ||
449 | } | ||
275 | iocb->ulpClass = CLASS3; | 450 | iocb->ulpClass = CLASS3; |
276 | 451 | ||
277 | return psb; | 452 | return psb; |
@@ -313,8 +488,9 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) | |||
313 | struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; | 488 | struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; |
314 | struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; | 489 | struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; |
315 | IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; | 490 | IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; |
491 | struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde; | ||
316 | dma_addr_t physaddr; | 492 | dma_addr_t physaddr; |
317 | uint32_t i, num_bde = 0; | 493 | uint32_t num_bde = 0; |
318 | int nseg, datadir = scsi_cmnd->sc_data_direction; | 494 | int nseg, datadir = scsi_cmnd->sc_data_direction; |
319 | 495 | ||
320 | /* | 496 | /* |
@@ -352,37 +528,159 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) | |||
352 | * during probe that limits the number of sg elements in any | 528 | * during probe that limits the number of sg elements in any |
353 | * single scsi command. Just run through the seg_cnt and format | 529 | * single scsi command. Just run through the seg_cnt and format |
354 | * the bde's. | 530 | * the bde's. |
531 | * When using SLI-3 the driver will try to fit all the BDEs into | ||
532 | * the IOCB. If it can't then the BDEs get added to a BPL as it | ||
533 | * does for SLI-2 mode. | ||
355 | */ | 534 | */ |
356 | scsi_for_each_sg(scsi_cmnd, sgel, nseg, i) { | 535 | scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { |
357 | physaddr = sg_dma_address(sgel); | 536 | physaddr = sg_dma_address(sgel); |
358 | bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); | 537 | if (phba->sli_rev == 3 && |
359 | bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); | 538 | nseg <= LPFC_EXT_DATA_BDE_COUNT) { |
360 | bpl->tus.f.bdeSize = sg_dma_len(sgel); | 539 | data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
361 | if (datadir == DMA_TO_DEVICE) | 540 | data_bde->tus.f.bdeSize = sg_dma_len(sgel); |
362 | bpl->tus.f.bdeFlags = 0; | 541 | data_bde->addrLow = putPaddrLow(physaddr); |
363 | else | 542 | data_bde->addrHigh = putPaddrHigh(physaddr); |
364 | bpl->tus.f.bdeFlags = BUFF_USE_RCV; | 543 | data_bde++; |
365 | bpl->tus.w = le32_to_cpu(bpl->tus.w); | 544 | } else { |
366 | bpl++; | 545 | bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
367 | num_bde++; | 546 | bpl->tus.f.bdeSize = sg_dma_len(sgel); |
547 | bpl->tus.w = le32_to_cpu(bpl->tus.w); | ||
548 | bpl->addrLow = | ||
549 | le32_to_cpu(putPaddrLow(physaddr)); | ||
550 | bpl->addrHigh = | ||
551 | le32_to_cpu(putPaddrHigh(physaddr)); | ||
552 | bpl++; | ||
553 | } | ||
368 | } | 554 | } |
369 | } | 555 | } |
370 | 556 | ||
371 | /* | 557 | /* |
372 | * Finish initializing those IOCB fields that are dependent on the | 558 | * Finish initializing those IOCB fields that are dependent on the |
373 | * scsi_cmnd request_buffer. Note that the bdeSize is explicitly | 559 | * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is |
374 | * reinitialized since all iocb memory resources are used many times | 560 | * explicitly reinitialized and for SLI-3 the extended bde count is |
375 | * for transmit, receive, and continuation bpl's. | 561 | * explicitly reinitialized since all iocb memory resources are reused. |
376 | */ | 562 | */ |
377 | iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); | 563 | if (phba->sli_rev == 3) { |
378 | iocb_cmd->un.fcpi64.bdl.bdeSize += | 564 | if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { |
379 | (num_bde * sizeof (struct ulp_bde64)); | 565 | /* |
380 | iocb_cmd->ulpBdeCount = 1; | 566 | * The extended IOCB format can only fit 3 BDE or a BPL. |
381 | iocb_cmd->ulpLe = 1; | 567 | * This I/O has more than 3 BDE so the 1st data bde will |
568 | * be a BPL that is filled in here. | ||
569 | */ | ||
570 | physaddr = lpfc_cmd->dma_handle; | ||
571 | data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64; | ||
572 | data_bde->tus.f.bdeSize = (num_bde * | ||
573 | sizeof(struct ulp_bde64)); | ||
574 | physaddr += (sizeof(struct fcp_cmnd) + | ||
575 | sizeof(struct fcp_rsp) + | ||
576 | (2 * sizeof(struct ulp_bde64))); | ||
577 | data_bde->addrHigh = putPaddrHigh(physaddr); | ||
578 | data_bde->addrLow = putPaddrLow(physaddr); | ||
579 | /* ebde count includes the responce bde and data bpl */ | ||
580 | iocb_cmd->unsli3.fcp_ext.ebde_count = 2; | ||
581 | } else { | ||
582 | /* ebde count includes the responce bde and data bdes */ | ||
583 | iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); | ||
584 | } | ||
585 | } else { | ||
586 | iocb_cmd->un.fcpi64.bdl.bdeSize = | ||
587 | ((num_bde + 2) * sizeof(struct ulp_bde64)); | ||
588 | } | ||
382 | fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); | 589 | fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); |
383 | return 0; | 590 | return 0; |
384 | } | 591 | } |
385 | 592 | ||
593 | /** | ||
594 | * lpfc_send_scsi_error_event: Posts an event when there is SCSI error. | ||
595 | * @phba: Pointer to hba context object. | ||
596 | * @vport: Pointer to vport object. | ||
597 | * @lpfc_cmd: Pointer to lpfc scsi command which reported the error. | ||
598 | * @rsp_iocb: Pointer to response iocb object which reported error. | ||
599 | * | ||
600 | * This function posts an event when there is a SCSI command reporting | ||
601 | * error from the scsi device. | ||
602 | **/ | ||
603 | static void | ||
604 | lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, | ||
605 | struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) { | ||
606 | struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; | ||
607 | struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; | ||
608 | uint32_t resp_info = fcprsp->rspStatus2; | ||
609 | uint32_t scsi_status = fcprsp->rspStatus3; | ||
610 | uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm; | ||
611 | struct lpfc_fast_path_event *fast_path_evt = NULL; | ||
612 | struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode; | ||
613 | unsigned long flags; | ||
614 | |||
615 | /* If there is queuefull or busy condition send a scsi event */ | ||
616 | if ((cmnd->result == SAM_STAT_TASK_SET_FULL) || | ||
617 | (cmnd->result == SAM_STAT_BUSY)) { | ||
618 | fast_path_evt = lpfc_alloc_fast_evt(phba); | ||
619 | if (!fast_path_evt) | ||
620 | return; | ||
621 | fast_path_evt->un.scsi_evt.event_type = | ||
622 | FC_REG_SCSI_EVENT; | ||
623 | fast_path_evt->un.scsi_evt.subcategory = | ||
624 | (cmnd->result == SAM_STAT_TASK_SET_FULL) ? | ||
625 | LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY; | ||
626 | fast_path_evt->un.scsi_evt.lun = cmnd->device->lun; | ||
627 | memcpy(&fast_path_evt->un.scsi_evt.wwpn, | ||
628 | &pnode->nlp_portname, sizeof(struct lpfc_name)); | ||
629 | memcpy(&fast_path_evt->un.scsi_evt.wwnn, | ||
630 | &pnode->nlp_nodename, sizeof(struct lpfc_name)); | ||
631 | } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen && | ||
632 | ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) { | ||
633 | fast_path_evt = lpfc_alloc_fast_evt(phba); | ||
634 | if (!fast_path_evt) | ||
635 | return; | ||
636 | fast_path_evt->un.check_cond_evt.scsi_event.event_type = | ||
637 | FC_REG_SCSI_EVENT; | ||
638 | fast_path_evt->un.check_cond_evt.scsi_event.subcategory = | ||
639 | LPFC_EVENT_CHECK_COND; | ||
640 | fast_path_evt->un.check_cond_evt.scsi_event.lun = | ||
641 | cmnd->device->lun; | ||
642 | memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn, | ||
643 | &pnode->nlp_portname, sizeof(struct lpfc_name)); | ||
644 | memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn, | ||
645 | &pnode->nlp_nodename, sizeof(struct lpfc_name)); | ||
646 | fast_path_evt->un.check_cond_evt.sense_key = | ||
647 | cmnd->sense_buffer[2] & 0xf; | ||
648 | fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12]; | ||
649 | fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13]; | ||
650 | } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && | ||
651 | fcpi_parm && | ||
652 | ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) || | ||
653 | ((scsi_status == SAM_STAT_GOOD) && | ||
654 | !(resp_info & (RESID_UNDER | RESID_OVER))))) { | ||
655 | /* | ||
656 | * If status is good or resid does not match with fcp_param and | ||
657 | * there is valid fcpi_parm, then there is a read_check error | ||
658 | */ | ||
659 | fast_path_evt = lpfc_alloc_fast_evt(phba); | ||
660 | if (!fast_path_evt) | ||
661 | return; | ||
662 | fast_path_evt->un.read_check_error.header.event_type = | ||
663 | FC_REG_FABRIC_EVENT; | ||
664 | fast_path_evt->un.read_check_error.header.subcategory = | ||
665 | LPFC_EVENT_FCPRDCHKERR; | ||
666 | memcpy(&fast_path_evt->un.read_check_error.header.wwpn, | ||
667 | &pnode->nlp_portname, sizeof(struct lpfc_name)); | ||
668 | memcpy(&fast_path_evt->un.read_check_error.header.wwnn, | ||
669 | &pnode->nlp_nodename, sizeof(struct lpfc_name)); | ||
670 | fast_path_evt->un.read_check_error.lun = cmnd->device->lun; | ||
671 | fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0]; | ||
672 | fast_path_evt->un.read_check_error.fcpiparam = | ||
673 | fcpi_parm; | ||
674 | } else | ||
675 | return; | ||
676 | |||
677 | fast_path_evt->vport = vport; | ||
678 | spin_lock_irqsave(&phba->hbalock, flags); | ||
679 | list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list); | ||
680 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
681 | lpfc_worker_wake_up(phba); | ||
682 | return; | ||
683 | } | ||
386 | static void | 684 | static void |
387 | lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) | 685 | lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) |
388 | { | 686 | { |
@@ -411,6 +709,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
411 | uint32_t rsplen = 0; | 709 | uint32_t rsplen = 0; |
412 | uint32_t logit = LOG_FCP | LOG_FCP_ERROR; | 710 | uint32_t logit = LOG_FCP | LOG_FCP_ERROR; |
413 | 711 | ||
712 | |||
414 | /* | 713 | /* |
415 | * If this is a task management command, there is no | 714 | * If this is a task management command, there is no |
416 | * scsi packet associated with this lpfc_cmd. The driver | 715 | * scsi packet associated with this lpfc_cmd. The driver |
@@ -526,6 +825,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
526 | 825 | ||
527 | out: | 826 | out: |
528 | cmnd->result = ScsiResult(host_status, scsi_status); | 827 | cmnd->result = ScsiResult(host_status, scsi_status); |
828 | lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb); | ||
529 | } | 829 | } |
530 | 830 | ||
531 | static void | 831 | static void |
@@ -542,9 +842,11 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
542 | struct scsi_device *sdev, *tmp_sdev; | 842 | struct scsi_device *sdev, *tmp_sdev; |
543 | int depth = 0; | 843 | int depth = 0; |
544 | unsigned long flags; | 844 | unsigned long flags; |
845 | struct lpfc_fast_path_event *fast_path_evt; | ||
545 | 846 | ||
546 | lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; | 847 | lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; |
547 | lpfc_cmd->status = pIocbOut->iocb.ulpStatus; | 848 | lpfc_cmd->status = pIocbOut->iocb.ulpStatus; |
849 | atomic_dec(&pnode->cmd_pending); | ||
548 | 850 | ||
549 | if (lpfc_cmd->status) { | 851 | if (lpfc_cmd->status) { |
550 | if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && | 852 | if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && |
@@ -570,12 +872,36 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
570 | break; | 872 | break; |
571 | case IOSTAT_NPORT_BSY: | 873 | case IOSTAT_NPORT_BSY: |
572 | case IOSTAT_FABRIC_BSY: | 874 | case IOSTAT_FABRIC_BSY: |
573 | cmd->result = ScsiResult(DID_BUS_BUSY, 0); | 875 | cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0); |
876 | fast_path_evt = lpfc_alloc_fast_evt(phba); | ||
877 | if (!fast_path_evt) | ||
878 | break; | ||
879 | fast_path_evt->un.fabric_evt.event_type = | ||
880 | FC_REG_FABRIC_EVENT; | ||
881 | fast_path_evt->un.fabric_evt.subcategory = | ||
882 | (lpfc_cmd->status == IOSTAT_NPORT_BSY) ? | ||
883 | LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY; | ||
884 | if (pnode && NLP_CHK_NODE_ACT(pnode)) { | ||
885 | memcpy(&fast_path_evt->un.fabric_evt.wwpn, | ||
886 | &pnode->nlp_portname, | ||
887 | sizeof(struct lpfc_name)); | ||
888 | memcpy(&fast_path_evt->un.fabric_evt.wwnn, | ||
889 | &pnode->nlp_nodename, | ||
890 | sizeof(struct lpfc_name)); | ||
891 | } | ||
892 | fast_path_evt->vport = vport; | ||
893 | fast_path_evt->work_evt.evt = | ||
894 | LPFC_EVT_FASTPATH_MGMT_EVT; | ||
895 | spin_lock_irqsave(&phba->hbalock, flags); | ||
896 | list_add_tail(&fast_path_evt->work_evt.evt_listp, | ||
897 | &phba->work_list); | ||
898 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
899 | lpfc_worker_wake_up(phba); | ||
574 | break; | 900 | break; |
575 | case IOSTAT_LOCAL_REJECT: | 901 | case IOSTAT_LOCAL_REJECT: |
576 | if (lpfc_cmd->result == RJT_UNAVAIL_PERM || | 902 | if (lpfc_cmd->result == IOERR_INVALID_RPI || |
577 | lpfc_cmd->result == IOERR_NO_RESOURCES || | 903 | lpfc_cmd->result == IOERR_NO_RESOURCES || |
578 | lpfc_cmd->result == RJT_LOGIN_REQUIRED) { | 904 | lpfc_cmd->result == IOERR_ABORT_REQUESTED) { |
579 | cmd->result = ScsiResult(DID_REQUEUE, 0); | 905 | cmd->result = ScsiResult(DID_REQUEUE, 0); |
580 | break; | 906 | break; |
581 | } /* else: fall through */ | 907 | } /* else: fall through */ |
@@ -586,7 +912,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
586 | 912 | ||
587 | if (!pnode || !NLP_CHK_NODE_ACT(pnode) | 913 | if (!pnode || !NLP_CHK_NODE_ACT(pnode) |
588 | || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) | 914 | || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) |
589 | cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY); | 915 | cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, |
916 | SAM_STAT_BUSY); | ||
590 | } else { | 917 | } else { |
591 | cmd->result = ScsiResult(DID_OK, 0); | 918 | cmd->result = ScsiResult(DID_OK, 0); |
592 | } | 919 | } |
@@ -602,8 +929,32 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
602 | scsi_get_resid(cmd)); | 929 | scsi_get_resid(cmd)); |
603 | } | 930 | } |
604 | 931 | ||
932 | lpfc_update_stats(phba, lpfc_cmd); | ||
605 | result = cmd->result; | 933 | result = cmd->result; |
606 | sdev = cmd->device; | 934 | sdev = cmd->device; |
935 | if (vport->cfg_max_scsicmpl_time && | ||
936 | time_after(jiffies, lpfc_cmd->start_time + | ||
937 | msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { | ||
938 | spin_lock_irqsave(sdev->host->host_lock, flags); | ||
939 | if ((pnode->cmd_qdepth > atomic_read(&pnode->cmd_pending) && | ||
940 | (atomic_read(&pnode->cmd_pending) > LPFC_MIN_TGT_QDEPTH) && | ||
941 | ((cmd->cmnd[0] == READ_10) || (cmd->cmnd[0] == WRITE_10)))) | ||
942 | pnode->cmd_qdepth = atomic_read(&pnode->cmd_pending); | ||
943 | |||
944 | pnode->last_change_time = jiffies; | ||
945 | spin_unlock_irqrestore(sdev->host->host_lock, flags); | ||
946 | } else if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) && | ||
947 | time_after(jiffies, pnode->last_change_time + | ||
948 | msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) { | ||
949 | spin_lock_irqsave(sdev->host->host_lock, flags); | ||
950 | pnode->cmd_qdepth += pnode->cmd_qdepth * | ||
951 | LPFC_TGTQ_RAMPUP_PCENT / 100; | ||
952 | if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH) | ||
953 | pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH; | ||
954 | pnode->last_change_time = jiffies; | ||
955 | spin_unlock_irqrestore(sdev->host->host_lock, flags); | ||
956 | } | ||
957 | |||
607 | lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); | 958 | lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); |
608 | cmd->scsi_done(cmd); | 959 | cmd->scsi_done(cmd); |
609 | 960 | ||
@@ -647,6 +998,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
647 | pnode->last_ramp_up_time = jiffies; | 998 | pnode->last_ramp_up_time = jiffies; |
648 | } | 999 | } |
649 | } | 1000 | } |
1001 | lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode, | ||
1002 | 0xFFFFFFFF, | ||
1003 | sdev->queue_depth - 1, sdev->queue_depth); | ||
650 | } | 1004 | } |
651 | 1005 | ||
652 | /* | 1006 | /* |
@@ -676,6 +1030,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
676 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, | 1030 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
677 | "0711 detected queue full - lun queue " | 1031 | "0711 detected queue full - lun queue " |
678 | "depth adjusted to %d.\n", depth); | 1032 | "depth adjusted to %d.\n", depth); |
1033 | lpfc_send_sdev_queuedepth_change_event(phba, vport, | ||
1034 | pnode, 0xFFFFFFFF, | ||
1035 | depth+1, depth); | ||
679 | } | 1036 | } |
680 | } | 1037 | } |
681 | 1038 | ||
@@ -692,6 +1049,24 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
692 | lpfc_release_scsi_buf(phba, lpfc_cmd); | 1049 | lpfc_release_scsi_buf(phba, lpfc_cmd); |
693 | } | 1050 | } |
694 | 1051 | ||
1052 | /** | ||
1053 | * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB. | ||
1054 | * @data: A pointer to the immediate command data portion of the IOCB. | ||
1055 | * @fcp_cmnd: The FCP Command that is provided by the SCSI layer. | ||
1056 | * | ||
1057 | * The routine copies the entire FCP command from @fcp_cmnd to @data while | ||
1058 | * byte swapping the data to big endian format for transmission on the wire. | ||
1059 | **/ | ||
1060 | static void | ||
1061 | lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd) | ||
1062 | { | ||
1063 | int i, j; | ||
1064 | for (i = 0, j = 0; i < sizeof(struct fcp_cmnd); | ||
1065 | i += sizeof(uint32_t), j++) { | ||
1066 | ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]); | ||
1067 | } | ||
1068 | } | ||
1069 | |||
695 | static void | 1070 | static void |
696 | lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | 1071 | lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, |
697 | struct lpfc_nodelist *pnode) | 1072 | struct lpfc_nodelist *pnode) |
@@ -758,7 +1133,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
758 | fcp_cmnd->fcpCntl3 = 0; | 1133 | fcp_cmnd->fcpCntl3 = 0; |
759 | phba->fc4ControlRequests++; | 1134 | phba->fc4ControlRequests++; |
760 | } | 1135 | } |
761 | 1136 | if (phba->sli_rev == 3) | |
1137 | lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); | ||
762 | /* | 1138 | /* |
763 | * Finish initializing those IOCB fields that are independent | 1139 | * Finish initializing those IOCB fields that are independent |
764 | * of the scsi_cmnd request_buffer | 1140 | * of the scsi_cmnd request_buffer |
@@ -798,11 +1174,13 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, | |||
798 | piocb = &piocbq->iocb; | 1174 | piocb = &piocbq->iocb; |
799 | 1175 | ||
800 | fcp_cmnd = lpfc_cmd->fcp_cmnd; | 1176 | fcp_cmnd = lpfc_cmd->fcp_cmnd; |
801 | int_to_scsilun(lun, &lpfc_cmd->fcp_cmnd->fcp_lun); | 1177 | /* Clear out any old data in the FCP command area */ |
1178 | memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); | ||
1179 | int_to_scsilun(lun, &fcp_cmnd->fcp_lun); | ||
802 | fcp_cmnd->fcpCntl2 = task_mgmt_cmd; | 1180 | fcp_cmnd->fcpCntl2 = task_mgmt_cmd; |
803 | 1181 | if (vport->phba->sli_rev == 3) | |
1182 | lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd); | ||
804 | piocb->ulpCommand = CMD_FCP_ICMND64_CR; | 1183 | piocb->ulpCommand = CMD_FCP_ICMND64_CR; |
805 | |||
806 | piocb->ulpContext = ndlp->nlp_rpi; | 1184 | piocb->ulpContext = ndlp->nlp_rpi; |
807 | if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { | 1185 | if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { |
808 | piocb->ulpFCP2Rcvy = 1; | 1186 | piocb->ulpFCP2Rcvy = 1; |
@@ -967,9 +1345,12 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
967 | * transport is still transitioning. | 1345 | * transport is still transitioning. |
968 | */ | 1346 | */ |
969 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { | 1347 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { |
970 | cmnd->result = ScsiResult(DID_BUS_BUSY, 0); | 1348 | cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0); |
971 | goto out_fail_command; | 1349 | goto out_fail_command; |
972 | } | 1350 | } |
1351 | if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) | ||
1352 | goto out_host_busy; | ||
1353 | |||
973 | lpfc_cmd = lpfc_get_scsi_buf(phba); | 1354 | lpfc_cmd = lpfc_get_scsi_buf(phba); |
974 | if (lpfc_cmd == NULL) { | 1355 | if (lpfc_cmd == NULL) { |
975 | lpfc_adjust_queue_depth(phba); | 1356 | lpfc_adjust_queue_depth(phba); |
@@ -980,6 +1361,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
980 | goto out_host_busy; | 1361 | goto out_host_busy; |
981 | } | 1362 | } |
982 | 1363 | ||
1364 | lpfc_cmd->start_time = jiffies; | ||
983 | /* | 1365 | /* |
984 | * Store the midlayer's command structure for the completion phase | 1366 | * Store the midlayer's command structure for the completion phase |
985 | * and complete the command initialization. | 1367 | * and complete the command initialization. |
@@ -987,6 +1369,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
987 | lpfc_cmd->pCmd = cmnd; | 1369 | lpfc_cmd->pCmd = cmnd; |
988 | lpfc_cmd->rdata = rdata; | 1370 | lpfc_cmd->rdata = rdata; |
989 | lpfc_cmd->timeout = 0; | 1371 | lpfc_cmd->timeout = 0; |
1372 | lpfc_cmd->start_time = jiffies; | ||
990 | cmnd->host_scribble = (unsigned char *)lpfc_cmd; | 1373 | cmnd->host_scribble = (unsigned char *)lpfc_cmd; |
991 | cmnd->scsi_done = done; | 1374 | cmnd->scsi_done = done; |
992 | 1375 | ||
@@ -996,6 +1379,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
996 | 1379 | ||
997 | lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); | 1380 | lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); |
998 | 1381 | ||
1382 | atomic_inc(&ndlp->cmd_pending); | ||
999 | err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], | 1383 | err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], |
1000 | &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); | 1384 | &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); |
1001 | if (err) | 1385 | if (err) |
@@ -1010,6 +1394,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
1010 | return 0; | 1394 | return 0; |
1011 | 1395 | ||
1012 | out_host_busy_free_buf: | 1396 | out_host_busy_free_buf: |
1397 | atomic_dec(&ndlp->cmd_pending); | ||
1013 | lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); | 1398 | lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); |
1014 | lpfc_release_scsi_buf(phba, lpfc_cmd); | 1399 | lpfc_release_scsi_buf(phba, lpfc_cmd); |
1015 | out_host_busy: | 1400 | out_host_busy: |
@@ -1145,6 +1530,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) | |||
1145 | int ret = SUCCESS; | 1530 | int ret = SUCCESS; |
1146 | int status; | 1531 | int status; |
1147 | int cnt; | 1532 | int cnt; |
1533 | struct lpfc_scsi_event_header scsi_event; | ||
1148 | 1534 | ||
1149 | lpfc_block_error_handler(cmnd); | 1535 | lpfc_block_error_handler(cmnd); |
1150 | /* | 1536 | /* |
@@ -1163,6 +1549,19 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) | |||
1163 | break; | 1549 | break; |
1164 | pnode = rdata->pnode; | 1550 | pnode = rdata->pnode; |
1165 | } | 1551 | } |
1552 | |||
1553 | scsi_event.event_type = FC_REG_SCSI_EVENT; | ||
1554 | scsi_event.subcategory = LPFC_EVENT_TGTRESET; | ||
1555 | scsi_event.lun = 0; | ||
1556 | memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); | ||
1557 | memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); | ||
1558 | |||
1559 | fc_host_post_vendor_event(shost, | ||
1560 | fc_get_event_number(), | ||
1561 | sizeof(scsi_event), | ||
1562 | (char *)&scsi_event, | ||
1563 | SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); | ||
1564 | |||
1166 | if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) { | 1565 | if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) { |
1167 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, | 1566 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, |
1168 | "0721 LUN Reset rport " | 1567 | "0721 LUN Reset rport " |
@@ -1242,10 +1641,23 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) | |||
1242 | struct lpfc_hba *phba = vport->phba; | 1641 | struct lpfc_hba *phba = vport->phba; |
1243 | struct lpfc_nodelist *ndlp = NULL; | 1642 | struct lpfc_nodelist *ndlp = NULL; |
1244 | int match; | 1643 | int match; |
1245 | int ret = SUCCESS, status, i; | 1644 | int ret = SUCCESS, status = SUCCESS, i; |
1246 | int cnt; | 1645 | int cnt; |
1247 | struct lpfc_scsi_buf * lpfc_cmd; | 1646 | struct lpfc_scsi_buf * lpfc_cmd; |
1248 | unsigned long later; | 1647 | unsigned long later; |
1648 | struct lpfc_scsi_event_header scsi_event; | ||
1649 | |||
1650 | scsi_event.event_type = FC_REG_SCSI_EVENT; | ||
1651 | scsi_event.subcategory = LPFC_EVENT_BUSRESET; | ||
1652 | scsi_event.lun = 0; | ||
1653 | memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name)); | ||
1654 | memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name)); | ||
1655 | |||
1656 | fc_host_post_vendor_event(shost, | ||
1657 | fc_get_event_number(), | ||
1658 | sizeof(scsi_event), | ||
1659 | (char *)&scsi_event, | ||
1660 | SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); | ||
1249 | 1661 | ||
1250 | lpfc_block_error_handler(cmnd); | 1662 | lpfc_block_error_handler(cmnd); |
1251 | /* | 1663 | /* |