aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_scsi.c
diff options
context:
space:
mode:
authorJames Smart <James.Smart@Emulex.Com>2007-06-17 20:56:38 -0400
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2007-06-17 23:27:39 -0400
commit92d7f7b0cde3ad2260e7462b40867b57efd49851 (patch)
treefadb1d8f1a817c2f85937b5e9c3b830bdecb5555 /drivers/scsi/lpfc/lpfc_scsi.c
parented957684294618602b48f1950b0c9bbcb036583f (diff)
[SCSI] lpfc: NPIV: add NPIV support on top of SLI-3
NPIV support is added to the driver. It utilizes the interfaces of the fc transport for the creation and deletion of vports. Within the driver, a new Scsi_Host is created for each NPIV instance, and is paired with a new instance of a FC port. This allows N FC Port elements to share a single Adapter. Signed-off-by: James Smart <James.Smart@emulex.com> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_scsi.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c355
1 files changed, 272 insertions, 83 deletions
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 90c88733a4f5..af8f8968bfba 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -37,11 +37,160 @@
37#include "lpfc.h" 37#include "lpfc.h"
38#include "lpfc_logmsg.h" 38#include "lpfc_logmsg.h"
39#include "lpfc_crtn.h" 39#include "lpfc_crtn.h"
40#include "lpfc_vport.h"
40 41
41#define LPFC_RESET_WAIT 2 42#define LPFC_RESET_WAIT 2
42#define LPFC_ABORT_WAIT 2 43#define LPFC_ABORT_WAIT 2
43 44
44/* 45/*
46 * This function is called with no lock held when there is a resource
47 * error in driver or in firmware.
48 */
49void
50lpfc_adjust_queue_depth(struct lpfc_hba *phba)
51{
52 unsigned long flags;
53
54 spin_lock_irqsave(&phba->hbalock, flags);
55 atomic_inc(&phba->num_rsrc_err);
56 phba->last_rsrc_error_time = jiffies;
57
58 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
59 spin_unlock_irqrestore(&phba->hbalock, flags);
60 return;
61 }
62
63 phba->last_ramp_down_time = jiffies;
64
65 spin_unlock_irqrestore(&phba->hbalock, flags);
66
67 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
68 if ((phba->pport->work_port_events &
69 WORKER_RAMP_DOWN_QUEUE) == 0) {
70 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
71 }
72 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
73
74 spin_lock_irqsave(&phba->hbalock, flags);
75 if (phba->work_wait)
76 wake_up(phba->work_wait);
77 spin_unlock_irqrestore(&phba->hbalock, flags);
78
79 return;
80}
81
82/*
83 * This function is called with no lock held when there is a successful
84 * SCSI command completion.
85 */
86static inline void
87lpfc_rampup_queue_depth(struct lpfc_hba *phba,
88 struct scsi_device *sdev)
89{
90 unsigned long flags;
91 atomic_inc(&phba->num_cmd_success);
92
93 if (phba->cfg_lun_queue_depth <= sdev->queue_depth)
94 return;
95
96 spin_lock_irqsave(&phba->hbalock, flags);
97 if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
98 ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) {
99 spin_unlock_irqrestore(&phba->hbalock, flags);
100 return;
101 }
102
103 phba->last_ramp_up_time = jiffies;
104 spin_unlock_irqrestore(&phba->hbalock, flags);
105
106 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
107 if ((phba->pport->work_port_events &
108 WORKER_RAMP_UP_QUEUE) == 0) {
109 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
110 }
111 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
112
113 spin_lock_irqsave(&phba->hbalock, flags);
114 if (phba->work_wait)
115 wake_up(phba->work_wait);
116 spin_unlock_irqrestore(&phba->hbalock, flags);
117}
118
119void
120lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
121{
122 struct lpfc_vport *vport;
123 struct Scsi_Host *host;
124 struct scsi_device *sdev;
125 unsigned long new_queue_depth;
126 unsigned long num_rsrc_err, num_cmd_success;
127
128 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
129 num_cmd_success = atomic_read(&phba->num_cmd_success);
130
131 spin_lock_irq(&phba->hbalock);
132 list_for_each_entry(vport, &phba->port_list, listentry) {
133 host = lpfc_shost_from_vport(vport);
134 if (!scsi_host_get(host))
135 continue;
136
137 spin_unlock_irq(&phba->hbalock);
138
139 shost_for_each_device(sdev, host) {
140 new_queue_depth = sdev->queue_depth * num_rsrc_err /
141 (num_rsrc_err + num_cmd_success);
142 if (!new_queue_depth)
143 new_queue_depth = sdev->queue_depth - 1;
144 else
145 new_queue_depth =
146 sdev->queue_depth - new_queue_depth;
147
148 if (sdev->ordered_tags)
149 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
150 new_queue_depth);
151 else
152 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
153 new_queue_depth);
154 }
155 spin_lock_irq(&phba->hbalock);
156 scsi_host_put(host);
157 }
158 spin_unlock_irq(&phba->hbalock);
159 atomic_set(&phba->num_rsrc_err, 0);
160 atomic_set(&phba->num_cmd_success, 0);
161}
162
163void
164lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
165{
166 struct lpfc_vport *vport;
167 struct Scsi_Host *host;
168 struct scsi_device *sdev;
169
170 spin_lock_irq(&phba->hbalock);
171 list_for_each_entry(vport, &phba->port_list, listentry) {
172 host = lpfc_shost_from_vport(vport);
173 if (!scsi_host_get(host))
174 continue;
175
176 spin_unlock_irq(&phba->hbalock);
177 shost_for_each_device(sdev, host) {
178 if (sdev->ordered_tags)
179 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
180 sdev->queue_depth+1);
181 else
182 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
183 sdev->queue_depth+1);
184 }
185 spin_lock_irq(&phba->hbalock);
186 scsi_host_put(host);
187 }
188 spin_unlock_irq(&phba->hbalock);
189 atomic_set(&phba->num_rsrc_err, 0);
190 atomic_set(&phba->num_cmd_success, 0);
191}
192
193/*
45 * This routine allocates a scsi buffer, which contains all the necessary 194 * This routine allocates a scsi buffer, which contains all the necessary
46 * information needed to initiate a SCSI I/O. The non-DMAable buffer region 195 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
47 * contains information to build the IOCB. The DMAable region contains 196 * contains information to build the IOCB. The DMAable region contains
@@ -154,7 +303,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
154} 303}
155 304
156static void 305static void
157lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 306lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
158{ 307{
159 unsigned long iflag = 0; 308 unsigned long iflag = 0;
160 309
@@ -165,13 +314,16 @@ lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
165} 314}
166 315
167static int 316static int
168lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd) 317lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
169{ 318{
170 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 319 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
171 struct scatterlist *sgel = NULL; 320 struct scatterlist *sgel = NULL;
172 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 321 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
173 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; 322 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
174 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 323 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
324 uint32_t vpi = (lpfc_cmd->cur_iocbq.vport
325 ? lpfc_cmd->cur_iocbq.vport->vpi
326 : 0);
175 dma_addr_t physaddr; 327 dma_addr_t physaddr;
176 uint32_t i, num_bde = 0; 328 uint32_t i, num_bde = 0;
177 int datadir = scsi_cmnd->sc_data_direction; 329 int datadir = scsi_cmnd->sc_data_direction;
@@ -235,9 +387,9 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
235 dma_error = dma_mapping_error(physaddr); 387 dma_error = dma_mapping_error(physaddr);
236 if (dma_error) { 388 if (dma_error) {
237 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 389 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
238 "%d:0718 Unable to dma_map_single " 390 "%d (%d):0718 Unable to dma_map_single "
239 "request_buffer: x%x\n", 391 "request_buffer: x%x\n",
240 phba->brd_no, dma_error); 392 phba->brd_no, vpi, dma_error);
241 return 1; 393 return 1;
242 } 394 }
243 395
@@ -299,6 +451,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
299 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 451 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
300 struct lpfc_hba *phba = vport->phba; 452 struct lpfc_hba *phba = vport->phba;
301 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm; 453 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
454 uint32_t vpi = vport->vpi;
302 uint32_t resp_info = fcprsp->rspStatus2; 455 uint32_t resp_info = fcprsp->rspStatus2;
303 uint32_t scsi_status = fcprsp->rspStatus3; 456 uint32_t scsi_status = fcprsp->rspStatus3;
304 uint32_t *lp; 457 uint32_t *lp;
@@ -331,9 +484,9 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
331 logit = LOG_FCP; 484 logit = LOG_FCP;
332 485
333 lpfc_printf_log(phba, KERN_WARNING, logit, 486 lpfc_printf_log(phba, KERN_WARNING, logit,
334 "%d:0730 FCP command x%x failed: x%x SNS x%x x%x " 487 "%d (%d):0730 FCP command x%x failed: x%x SNS x%x x%x "
335 "Data: x%x x%x x%x x%x x%x\n", 488 "Data: x%x x%x x%x x%x x%x\n",
336 phba->brd_no, cmnd->cmnd[0], scsi_status, 489 phba->brd_no, vpi, cmnd->cmnd[0], scsi_status,
337 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, 490 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
338 be32_to_cpu(fcprsp->rspResId), 491 be32_to_cpu(fcprsp->rspResId),
339 be32_to_cpu(fcprsp->rspSnsLen), 492 be32_to_cpu(fcprsp->rspSnsLen),
@@ -354,10 +507,11 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
354 cmnd->resid = be32_to_cpu(fcprsp->rspResId); 507 cmnd->resid = be32_to_cpu(fcprsp->rspResId);
355 508
356 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 509 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
357 "%d:0716 FCP Read Underrun, expected %d, " 510 "%d (%d):0716 FCP Read Underrun, expected %d, "
358 "residual %d Data: x%x x%x x%x\n", phba->brd_no, 511 "residual %d Data: x%x x%x x%x\n",
359 be32_to_cpu(fcpcmd->fcpDl), cmnd->resid, 512 phba->brd_no, vpi, be32_to_cpu(fcpcmd->fcpDl),
360 fcpi_parm, cmnd->cmnd[0], cmnd->underflow); 513 cmnd->resid, fcpi_parm, cmnd->cmnd[0],
514 cmnd->underflow);
361 515
362 /* 516 /*
363 * If there is an under run check if under run reported by 517 * If there is an under run check if under run reported by
@@ -368,12 +522,12 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
368 fcpi_parm && 522 fcpi_parm &&
369 (cmnd->resid != fcpi_parm)) { 523 (cmnd->resid != fcpi_parm)) {
370 lpfc_printf_log(phba, KERN_WARNING, 524 lpfc_printf_log(phba, KERN_WARNING,
371 LOG_FCP | LOG_FCP_ERROR, 525 LOG_FCP | LOG_FCP_ERROR,
372 "%d:0735 FCP Read Check Error and Underrun " 526 "%d (%d):0735 FCP Read Check Error "
373 "Data: x%x x%x x%x x%x\n", phba->brd_no, 527 "and Underrun Data: x%x x%x x%x x%x\n",
374 be32_to_cpu(fcpcmd->fcpDl), 528 phba->brd_no, vpi,
375 cmnd->resid, 529 be32_to_cpu(fcpcmd->fcpDl),
376 fcpi_parm, cmnd->cmnd[0]); 530 cmnd->resid, fcpi_parm, cmnd->cmnd[0]);
377 cmnd->resid = cmnd->request_bufflen; 531 cmnd->resid = cmnd->request_bufflen;
378 host_status = DID_ERROR; 532 host_status = DID_ERROR;
379 } 533 }
@@ -387,19 +541,20 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
387 (scsi_status == SAM_STAT_GOOD) && 541 (scsi_status == SAM_STAT_GOOD) &&
388 (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) { 542 (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) {
389 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 543 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
390 "%d:0717 FCP command x%x residual " 544 "%d (%d):0717 FCP command x%x residual "
391 "underrun converted to error " 545 "underrun converted to error "
392 "Data: x%x x%x x%x\n", phba->brd_no, 546 "Data: x%x x%x x%x\n",
393 cmnd->cmnd[0], cmnd->request_bufflen, 547 phba->brd_no, vpi, cmnd->cmnd[0],
394 cmnd->resid, cmnd->underflow); 548 cmnd->request_bufflen, cmnd->resid,
549 cmnd->underflow);
395 550
396 host_status = DID_ERROR; 551 host_status = DID_ERROR;
397 } 552 }
398 } else if (resp_info & RESID_OVER) { 553 } else if (resp_info & RESID_OVER) {
399 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 554 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
400 "%d:0720 FCP command x%x residual " 555 "%d (%d):0720 FCP command x%x residual "
401 "overrun error. Data: x%x x%x \n", 556 "overrun error. Data: x%x x%x \n",
402 phba->brd_no, cmnd->cmnd[0], 557 phba->brd_no, vpi, cmnd->cmnd[0],
403 cmnd->request_bufflen, cmnd->resid); 558 cmnd->request_bufflen, cmnd->resid);
404 host_status = DID_ERROR; 559 host_status = DID_ERROR;
405 560
@@ -410,11 +565,12 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
410 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && 565 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
411 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { 566 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
412 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, 567 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
413 "%d:0734 FCP Read Check Error Data: " 568 "%d (%d):0734 FCP Read Check Error Data: "
414 "x%x x%x x%x x%x\n", phba->brd_no, 569 "x%x x%x x%x x%x\n",
415 be32_to_cpu(fcpcmd->fcpDl), 570 phba->brd_no, vpi,
416 be32_to_cpu(fcprsp->rspResId), 571 be32_to_cpu(fcpcmd->fcpDl),
417 fcpi_parm, cmnd->cmnd[0]); 572 be32_to_cpu(fcprsp->rspResId),
573 fcpi_parm, cmnd->cmnd[0]);
418 host_status = DID_ERROR; 574 host_status = DID_ERROR;
419 cmnd->resid = cmnd->request_bufflen; 575 cmnd->resid = cmnd->request_bufflen;
420 } 576 }
@@ -433,6 +589,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
433 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 589 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
434 struct lpfc_nodelist *pnode = rdata->pnode; 590 struct lpfc_nodelist *pnode = rdata->pnode;
435 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 591 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
592 uint32_t vpi = (lpfc_cmd->cur_iocbq.vport
593 ? lpfc_cmd->cur_iocbq.vport->vpi
594 : 0);
436 int result; 595 int result;
437 struct scsi_device *sdev, *tmp_sdev; 596 struct scsi_device *sdev, *tmp_sdev;
438 int depth = 0; 597 int depth = 0;
@@ -448,11 +607,13 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
448 lpfc_cmd->status = IOSTAT_DEFAULT; 607 lpfc_cmd->status = IOSTAT_DEFAULT;
449 608
450 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 609 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
451 "%d:0729 FCP cmd x%x failed <%d/%d> status: " 610 "%d (%d):0729 FCP cmd x%x failed <%d/%d> "
452 "x%x result: x%x Data: x%x x%x\n", 611 "status: x%x result: x%x Data: x%x x%x\n",
453 phba->brd_no, cmd->cmnd[0], cmd->device->id, 612 phba->brd_no, vpi, cmd->cmnd[0],
454 cmd->device->lun, lpfc_cmd->status, 613 cmd->device ? cmd->device->id : 0xffff,
455 lpfc_cmd->result, pIocbOut->iocb.ulpContext, 614 cmd->device ? cmd->device->lun : 0xffff,
615 lpfc_cmd->status, lpfc_cmd->result,
616 pIocbOut->iocb.ulpContext,
456 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 617 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
457 618
458 switch (lpfc_cmd->status) { 619 switch (lpfc_cmd->status) {
@@ -464,6 +625,13 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
464 case IOSTAT_FABRIC_BSY: 625 case IOSTAT_FABRIC_BSY:
465 cmd->result = ScsiResult(DID_BUS_BUSY, 0); 626 cmd->result = ScsiResult(DID_BUS_BUSY, 0);
466 break; 627 break;
628 case IOSTAT_LOCAL_REJECT:
629 if (lpfc_cmd->result == RJT_UNAVAIL_PERM ||
630 lpfc_cmd->result == IOERR_NO_RESOURCES ||
631 lpfc_cmd->result == RJT_LOGIN_REQUIRED) {
632 cmd->result = ScsiResult(DID_REQUEUE, 0);
633 break;
634 } /* else: fall through */
467 default: 635 default:
468 cmd->result = ScsiResult(DID_ERROR, 0); 636 cmd->result = ScsiResult(DID_ERROR, 0);
469 break; 637 break;
@@ -480,9 +648,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
480 uint32_t *lp = (uint32_t *)cmd->sense_buffer; 648 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
481 649
482 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 650 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
483 "%d:0710 Iodone <%d/%d> cmd %p, error x%x " 651 "%d (%d):0710 Iodone <%d/%d> cmd %p, error "
484 "SNS x%x x%x Data: x%x x%x\n", 652 "x%x SNS x%x x%x Data: x%x x%x\n",
485 phba->brd_no, cmd->device->id, 653 phba->brd_no, vpi, cmd->device->id,
486 cmd->device->lun, cmd, cmd->result, 654 cmd->device->lun, cmd, cmd->result,
487 *lp, *(lp + 3), cmd->retries, cmd->resid); 655 *lp, *(lp + 3), cmd->retries, cmd->resid);
488 } 656 }
@@ -497,6 +665,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
497 return; 665 return;
498 } 666 }
499 667
668
669 if (!result)
670 lpfc_rampup_queue_depth(phba, sdev);
671
500 if (!result && pnode != NULL && 672 if (!result && pnode != NULL &&
501 ((jiffies - pnode->last_ramp_up_time) > 673 ((jiffies - pnode->last_ramp_up_time) >
502 LPFC_Q_RAMP_UP_INTERVAL * HZ) && 674 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
@@ -545,8 +717,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
545 717
546 if (depth) { 718 if (depth) {
547 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 719 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
548 "%d:0711 detected queue full - lun queue depth " 720 "%d (%d):0711 detected queue full - "
549 " adjusted to %d.\n", phba->brd_no, depth); 721 "lun queue depth adjusted to %d.\n",
722 phba->brd_no, vpi, depth);
550 } 723 }
551 } 724 }
552 725
@@ -733,10 +906,10 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
733 906
734 /* Issue Target Reset to TGT <num> */ 907 /* Issue Target Reset to TGT <num> */
735 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 908 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
736 "%d:0702 Issue Target Reset to TGT %d " 909 "%d (%d):0702 Issue Target Reset to TGT %d "
737 "Data: x%x x%x\n", 910 "Data: x%x x%x\n",
738 phba->brd_no, tgt_id, rdata->pnode->nlp_rpi, 911 phba->brd_no, vport->vpi, tgt_id,
739 rdata->pnode->nlp_flag); 912 rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
740 913
741 ret = lpfc_sli_issue_iocb_wait(phba, 914 ret = lpfc_sli_issue_iocb_wait(phba,
742 &phba->sli.ring[phba->sli.fcp_ring], 915 &phba->sli.ring[phba->sli.fcp_ring],
@@ -842,9 +1015,12 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
842 } 1015 }
843 lpfc_cmd = lpfc_get_scsi_buf(phba); 1016 lpfc_cmd = lpfc_get_scsi_buf(phba);
844 if (lpfc_cmd == NULL) { 1017 if (lpfc_cmd == NULL) {
1018 lpfc_adjust_queue_depth(phba);
1019
845 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 1020 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
846 "%d:0707 driver's buffer pool is empty, " 1021 "%d (%d):0707 driver's buffer pool is empty, "
847 "IO busied\n", phba->brd_no); 1022 "IO busied\n",
1023 phba->brd_no, vport->vpi);
848 goto out_host_busy; 1024 goto out_host_busy;
849 } 1025 }
850 1026
@@ -865,7 +1041,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
865 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); 1041 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
866 1042
867 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], 1043 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
868 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 1044 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
869 if (err) 1045 if (err)
870 goto out_host_busy_free_buf; 1046 goto out_host_busy_free_buf;
871 1047
@@ -986,18 +1162,19 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
986 if (lpfc_cmd->pCmd == cmnd) { 1162 if (lpfc_cmd->pCmd == cmnd) {
987 ret = FAILED; 1163 ret = FAILED;
988 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1164 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
989 "%d:0748 abort handler timed out waiting for " 1165 "%d (%d):0748 abort handler timed out waiting "
990 "abort to complete: ret %#x, ID %d, LUN %d, " 1166 "for abort to complete: ret %#x, ID %d, "
991 "snum %#lx\n", 1167 "LUN %d, snum %#lx\n",
992 phba->brd_no, ret, cmnd->device->id, 1168 phba->brd_no, vport->vpi, ret,
993 cmnd->device->lun, cmnd->serial_number); 1169 cmnd->device->id, cmnd->device->lun,
1170 cmnd->serial_number);
994 } 1171 }
995 1172
996 out: 1173 out:
997 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 1174 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
998 "%d:0749 SCSI Layer I/O Abort Request " 1175 "%d (%d):0749 SCSI Layer I/O Abort Request "
999 "Status x%x ID %d LUN %d snum %#lx\n", 1176 "Status x%x ID %d LUN %d snum %#lx\n",
1000 phba->brd_no, ret, cmnd->device->id, 1177 phba->brd_no, vport->vpi, ret, cmnd->device->id,
1001 cmnd->device->lun, cmnd->serial_number); 1178 cmnd->device->lun, cmnd->serial_number);
1002 1179
1003 return ret; 1180 return ret;
@@ -1024,7 +1201,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1024 * If target is not in a MAPPED state, delay the reset until 1201 * If target is not in a MAPPED state, delay the reset until
1025 * target is rediscovered or devloss timeout expires. 1202 * target is rediscovered or devloss timeout expires.
1026 */ 1203 */
1027 while ( 1 ) { 1204 while (1) {
1028 if (!pnode) 1205 if (!pnode)
1029 goto out; 1206 goto out;
1030 1207
@@ -1035,9 +1212,10 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1035 if (!rdata || 1212 if (!rdata ||
1036 (loopcnt > ((phba->cfg_devloss_tmo * 2) + 1))) { 1213 (loopcnt > ((phba->cfg_devloss_tmo * 2) + 1))) {
1037 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1214 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1038 "%d:0721 LUN Reset rport failure:" 1215 "%d (%d):0721 LUN Reset rport "
1039 " cnt x%x rdata x%p\n", 1216 "failure: cnt x%x rdata x%p\n",
1040 phba->brd_no, loopcnt, rdata); 1217 phba->brd_no, vport->vpi,
1218 loopcnt, rdata);
1041 goto out; 1219 goto out;
1042 } 1220 }
1043 pnode = rdata->pnode; 1221 pnode = rdata->pnode;
@@ -1068,8 +1246,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1068 goto out_free_scsi_buf; 1246 goto out_free_scsi_buf;
1069 1247
1070 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 1248 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
1071 "%d:0703 Issue target reset to TGT %d LUN %d rpi x%x " 1249 "%d (%d):0703 Issue target reset to TGT %d LUN %d "
1072 "nlp_flag x%x\n", phba->brd_no, cmnd->device->id, 1250 "rpi x%x nlp_flag x%x\n",
1251 phba->brd_no, vport->vpi, cmnd->device->id,
1073 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); 1252 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
1074 1253
1075 iocb_status = lpfc_sli_issue_iocb_wait(phba, 1254 iocb_status = lpfc_sli_issue_iocb_wait(phba,
@@ -1103,7 +1282,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1103 cmnd->device->id, cmnd->device->lun, 1282 cmnd->device->id, cmnd->device->lun,
1104 0, LPFC_CTX_LUN); 1283 0, LPFC_CTX_LUN);
1105 loopcnt = 0; 1284 loopcnt = 0;
1106 while (cnt) { 1285 while(cnt) {
1107 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); 1286 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
1108 1287
1109 if (++loopcnt 1288 if (++loopcnt
@@ -1118,8 +1297,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1118 1297
1119 if (cnt) { 1298 if (cnt) {
1120 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1299 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1121 "%d:0719 device reset I/O flush failure: cnt x%x\n", 1300 "%d (%d):0719 device reset I/O flush failure: "
1122 phba->brd_no, cnt); 1301 "cnt x%x\n",
1302 phba->brd_no, vport->vpi, cnt);
1123 ret = FAILED; 1303 ret = FAILED;
1124 } 1304 }
1125 1305
@@ -1128,10 +1308,10 @@ out_free_scsi_buf:
1128 lpfc_release_scsi_buf(phba, lpfc_cmd); 1308 lpfc_release_scsi_buf(phba, lpfc_cmd);
1129 } 1309 }
1130 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1310 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1131 "%d:0713 SCSI layer issued device reset (%d, %d) " 1311 "%d (%d):0713 SCSI layer issued device reset (%d, %d) "
1132 "return x%x status x%x result x%x\n", 1312 "return x%x status x%x result x%x\n",
1133 phba->brd_no, cmnd->device->id, cmnd->device->lun, 1313 phba->brd_no, vport->vpi, cmnd->device->id,
1134 ret, cmd_status, cmd_result); 1314 cmnd->device->lun, ret, cmd_status, cmd_result);
1135 1315
1136out: 1316out:
1137 return ret; 1317 return ret;
@@ -1184,8 +1364,9 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1184 ndlp->rport->dd_data); 1364 ndlp->rport->dd_data);
1185 if (ret != SUCCESS) { 1365 if (ret != SUCCESS) {
1186 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1366 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1187 "%d:0700 Bus Reset on target %d failed\n", 1367 "%d (%d):0700 Bus Reset on target %d "
1188 phba->brd_no, i); 1368 "failed\n",
1369 phba->brd_no, vport->vpi, i);
1189 err_count++; 1370 err_count++;
1190 break; 1371 break;
1191 } 1372 }
@@ -1210,7 +1391,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1210 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 1391 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
1211 0, 0, 0, LPFC_CTX_HOST); 1392 0, 0, 0, LPFC_CTX_HOST);
1212 loopcnt = 0; 1393 loopcnt = 0;
1213 while (cnt) { 1394 while(cnt) {
1214 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); 1395 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
1215 1396
1216 if (++loopcnt 1397 if (++loopcnt
@@ -1224,16 +1405,15 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1224 1405
1225 if (cnt) { 1406 if (cnt) {
1226 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1407 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1227 "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n", 1408 "%d (%d):0715 Bus Reset I/O flush failure: "
1228 phba->brd_no, cnt, i); 1409 "cnt x%x left x%x\n",
1410 phba->brd_no, vport->vpi, cnt, i);
1229 ret = FAILED; 1411 ret = FAILED;
1230 } 1412 }
1231 1413
1232 lpfc_printf_log(phba, 1414 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1233 KERN_ERR, 1415 "%d (%d):0714 SCSI layer issued Bus Reset Data: x%x\n",
1234 LOG_FCP, 1416 phba->brd_no, vport->vpi, ret);
1235 "%d:0714 SCSI layer issued Bus Reset Data: x%x\n",
1236 phba->brd_no, ret);
1237out: 1417out:
1238 return ret; 1418 return ret;
1239} 1419}
@@ -1263,17 +1443,24 @@ lpfc_slave_alloc(struct scsi_device *sdev)
1263 */ 1443 */
1264 total = phba->total_scsi_bufs; 1444 total = phba->total_scsi_bufs;
1265 num_to_alloc = phba->cfg_lun_queue_depth + 2; 1445 num_to_alloc = phba->cfg_lun_queue_depth + 2;
1266 if (total >= phba->cfg_hba_queue_depth) { 1446
1447 /* Allow some exchanges to be available always to complete discovery */
1448 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
1267 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 1449 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
1268 "%d:0704 At limitation of %d preallocated " 1450 "%d (%d):0704 At limitation of %d "
1269 "command buffers\n", phba->brd_no, total); 1451 "preallocated command buffers\n",
1452 phba->brd_no, vport->vpi, total);
1270 return 0; 1453 return 0;
1271 } else if (total + num_to_alloc > phba->cfg_hba_queue_depth) { 1454
1455 /* Allow some exchanges to be available always to complete discovery */
1456 } else if (total + num_to_alloc >
1457 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
1272 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 1458 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
1273 "%d:0705 Allocation request of %d command " 1459 "%d (%d):0705 Allocation request of %d "
1274 "buffers will exceed max of %d. Reducing " 1460 "command buffers will exceed max of %d. "
1275 "allocation request to %d.\n", phba->brd_no, 1461 "Reducing allocation request to %d.\n",
1276 num_to_alloc, phba->cfg_hba_queue_depth, 1462 phba->brd_no, vport->vpi, num_to_alloc,
1463 phba->cfg_hba_queue_depth,
1277 (phba->cfg_hba_queue_depth - total)); 1464 (phba->cfg_hba_queue_depth - total));
1278 num_to_alloc = phba->cfg_hba_queue_depth - total; 1465 num_to_alloc = phba->cfg_hba_queue_depth - total;
1279 } 1466 }
@@ -1282,8 +1469,9 @@ lpfc_slave_alloc(struct scsi_device *sdev)
1282 scsi_buf = lpfc_new_scsi_buf(vport); 1469 scsi_buf = lpfc_new_scsi_buf(vport);
1283 if (!scsi_buf) { 1470 if (!scsi_buf) {
1284 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1471 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1285 "%d:0706 Failed to allocate command " 1472 "%d (%d):0706 Failed to allocate "
1286 "buffer\n", phba->brd_no); 1473 "command buffer\n",
1474 phba->brd_no, vport->vpi);
1287 break; 1475 break;
1288 } 1476 }
1289 1477
@@ -1331,6 +1519,7 @@ lpfc_slave_destroy(struct scsi_device *sdev)
1331 return; 1519 return;
1332} 1520}
1333 1521
1522
1334struct scsi_host_template lpfc_template = { 1523struct scsi_host_template lpfc_template = {
1335 .module = THIS_MODULE, 1524 .module = THIS_MODULE,
1336 .name = LPFC_DRIVER_NAME, 1525 .name = LPFC_DRIVER_NAME,