aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
authorJames Smart <james.smart@emulex.com>2011-10-10 21:33:49 -0400
committerJames Bottomley <JBottomley@Parallels.com>2011-10-16 12:32:17 -0400
commit5350d872c19a59ef8eadab1e70db83064c134cfa (patch)
tree4a34c698b36f0874ca808a76583a1d69b122437d /drivers/scsi/lpfc
parentcd1c8301db15ee52bfc5a0e5bc16b52bab8475aa (diff)
[SCSI] lpfc 8.3.27: Fix queue allocation failure recovery
Fix queue allocation failure recovery - Move the allocation of the Queues closer to the creation of the queues. - If there is a problem with creation, or if the HBA is reset, the queues will be completely freed and re allocated. - Only allocate fcp_eq_hdl if cfg_fcp_eq_count is non-zero. Signed-off-by: Alex Iannicelli <alex.iannicelli@emulex.com> Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c153
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c30
3 files changed, 118 insertions, 67 deletions
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 9067831d9223..60f95347babf 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -451,3 +451,5 @@ int lpfc_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t *);
451/* functions to support SR-IOV */ 451/* functions to support SR-IOV */
452int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int); 452int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int);
453uint16_t lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *); 453uint16_t lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *);
454int lpfc_sli4_queue_create(struct lpfc_hba *);
455void lpfc_sli4_queue_destroy(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 99bc2bb1987f..907c94b9245d 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -58,8 +58,7 @@ spinlock_t _dump_buf_lock;
58 58
59static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 59static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
60static int lpfc_post_rcv_buf(struct lpfc_hba *); 60static int lpfc_post_rcv_buf(struct lpfc_hba *);
61static int lpfc_sli4_queue_create(struct lpfc_hba *); 61static int lpfc_sli4_queue_verify(struct lpfc_hba *);
62static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
63static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 62static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
64static int lpfc_setup_endian_order(struct lpfc_hba *); 63static int lpfc_setup_endian_order(struct lpfc_hba *);
65static int lpfc_sli4_read_config(struct lpfc_hba *); 64static int lpfc_sli4_read_config(struct lpfc_hba *);
@@ -4493,15 +4492,15 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4493 } 4492 }
4494 } 4493 }
4495 mempool_free(mboxq, phba->mbox_mem_pool); 4494 mempool_free(mboxq, phba->mbox_mem_pool);
4496 /* Create all the SLI4 queues */ 4495 /* Verify all the SLI4 queues */
4497 rc = lpfc_sli4_queue_create(phba); 4496 rc = lpfc_sli4_queue_verify(phba);
4498 if (rc) 4497 if (rc)
4499 goto out_free_bsmbx; 4498 goto out_free_bsmbx;
4500 4499
4501 /* Create driver internal CQE event pool */ 4500 /* Create driver internal CQE event pool */
4502 rc = lpfc_sli4_cq_event_pool_create(phba); 4501 rc = lpfc_sli4_cq_event_pool_create(phba);
4503 if (rc) 4502 if (rc)
4504 goto out_destroy_queue; 4503 goto out_free_bsmbx;
4505 4504
4506 /* Initialize and populate the iocb list per host */ 4505 /* Initialize and populate the iocb list per host */
4507 rc = lpfc_init_sgl_list(phba); 4506 rc = lpfc_init_sgl_list(phba);
@@ -4535,14 +4534,21 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4535 goto out_remove_rpi_hdrs; 4534 goto out_remove_rpi_hdrs;
4536 } 4535 }
4537 4536
4538 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 4537 /*
4538 * The cfg_fcp_eq_count can be zero whenever there is exactly one
4539 * interrupt vector. This is not an error
4540 */
4541 if (phba->cfg_fcp_eq_count) {
4542 phba->sli4_hba.fcp_eq_hdl =
4543 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
4539 phba->cfg_fcp_eq_count), GFP_KERNEL); 4544 phba->cfg_fcp_eq_count), GFP_KERNEL);
4540 if (!phba->sli4_hba.fcp_eq_hdl) { 4545 if (!phba->sli4_hba.fcp_eq_hdl) {
4541 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4546 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4542 "2572 Failed allocate memory for fast-path " 4547 "2572 Failed allocate memory for "
4543 "per-EQ handle array\n"); 4548 "fast-path per-EQ handle array\n");
4544 rc = -ENOMEM; 4549 rc = -ENOMEM;
4545 goto out_free_fcf_rr_bmask; 4550 goto out_free_fcf_rr_bmask;
4551 }
4546 } 4552 }
4547 4553
4548 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 4554 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
@@ -4586,8 +4592,6 @@ out_free_sgl_list:
4586 lpfc_free_sgl_list(phba); 4592 lpfc_free_sgl_list(phba);
4587out_destroy_cq_event_pool: 4593out_destroy_cq_event_pool:
4588 lpfc_sli4_cq_event_pool_destroy(phba); 4594 lpfc_sli4_cq_event_pool_destroy(phba);
4589out_destroy_queue:
4590 lpfc_sli4_queue_destroy(phba);
4591out_free_bsmbx: 4595out_free_bsmbx:
4592 lpfc_destroy_bootstrap_mbox(phba); 4596 lpfc_destroy_bootstrap_mbox(phba);
4593out_free_mem: 4597out_free_mem:
@@ -4627,9 +4631,6 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4627 /* Free the SCSI sgl management array */ 4631 /* Free the SCSI sgl management array */
4628 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 4632 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4629 4633
4630 /* Free the SLI4 queues */
4631 lpfc_sli4_queue_destroy(phba);
4632
4633 /* Free the completion queue EQ event pool */ 4634 /* Free the completion queue EQ event pool */
4634 lpfc_sli4_cq_event_release_all(phba); 4635 lpfc_sli4_cq_event_release_all(phba);
4635 lpfc_sli4_cq_event_pool_destroy(phba); 4636 lpfc_sli4_cq_event_pool_destroy(phba);
@@ -6158,24 +6159,21 @@ lpfc_setup_endian_order(struct lpfc_hba *phba)
6158} 6159}
6159 6160
6160/** 6161/**
6161 * lpfc_sli4_queue_create - Create all the SLI4 queues 6162 * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts
6162 * @phba: pointer to lpfc hba data structure. 6163 * @phba: pointer to lpfc hba data structure.
6163 * 6164 *
6164 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 6165 * This routine is invoked to check the user settable queue counts for EQs and
6165 * operation. For each SLI4 queue type, the parameters such as queue entry 6166 * CQs. after this routine is called the counts will be set to valid values that
6166 * count (queue depth) shall be taken from the module parameter. For now, 6167 * adhere to the constraints of the system's interrupt vectors and the port's
6167 * we just use some constant number as place holder. 6168 * queue resources.
6168 * 6169 *
6169 * Return codes 6170 * Return codes
6170 * 0 - successful 6171 * 0 - successful
6171 * -ENOMEM - No available memory 6172 * -ENOMEM - No available memory
6172 * -EIO - The mailbox failed to complete successfully.
6173 **/ 6173 **/
6174static int 6174static int
6175lpfc_sli4_queue_create(struct lpfc_hba *phba) 6175lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6176{ 6176{
6177 struct lpfc_queue *qdesc;
6178 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
6179 int cfg_fcp_wq_count; 6177 int cfg_fcp_wq_count;
6180 int cfg_fcp_eq_count; 6178 int cfg_fcp_eq_count;
6181 6179
@@ -6248,14 +6246,43 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6248 /* The overall number of event queues used */ 6246 /* The overall number of event queues used */
6249 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; 6247 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
6250 6248
6251 /*
6252 * Create Event Queues (EQs)
6253 */
6254
6255 /* Get EQ depth from module parameter, fake the default for now */ 6249 /* Get EQ depth from module parameter, fake the default for now */
6256 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 6250 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
6257 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 6251 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
6258 6252
6253 /* Get CQ depth from module parameter, fake the default for now */
6254 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
6255 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
6256
6257 return 0;
6258out_error:
6259 return -ENOMEM;
6260}
6261
6262/**
6263 * lpfc_sli4_queue_create - Create all the SLI4 queues
6264 * @phba: pointer to lpfc hba data structure.
6265 *
6266 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
6267 * operation. For each SLI4 queue type, the parameters such as queue entry
6268 * count (queue depth) shall be taken from the module parameter. For now,
6269 * we just use some constant number as place holder.
6270 *
6271 * Return codes
6272 * 0 - sucessful
6273 * -ENOMEM - No availble memory
6274 * -EIO - The mailbox failed to complete successfully.
6275 **/
6276int
6277lpfc_sli4_queue_create(struct lpfc_hba *phba)
6278{
6279 struct lpfc_queue *qdesc;
6280 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
6281
6282 /*
6283 * Create Event Queues (EQs)
6284 */
6285
6259 /* Create slow path event queue */ 6286 /* Create slow path event queue */
6260 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 6287 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
6261 phba->sli4_hba.eq_ecount); 6288 phba->sli4_hba.eq_ecount);
@@ -6266,14 +6293,20 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6266 } 6293 }
6267 phba->sli4_hba.sp_eq = qdesc; 6294 phba->sli4_hba.sp_eq = qdesc;
6268 6295
6269 /* Create fast-path FCP Event Queue(s) */ 6296 /*
6270 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * 6297 * Create fast-path FCP Event Queue(s). The cfg_fcp_eq_count can be
6271 phba->cfg_fcp_eq_count), GFP_KERNEL); 6298 * zero whenever there is exactly one interrupt vector. This is not
6272 if (!phba->sli4_hba.fp_eq) { 6299 * an error.
6273 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6300 */
6274 "2576 Failed allocate memory for fast-path " 6301 if (phba->cfg_fcp_eq_count) {
6275 "EQ record array\n"); 6302 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
6276 goto out_free_sp_eq; 6303 phba->cfg_fcp_eq_count), GFP_KERNEL);
6304 if (!phba->sli4_hba.fp_eq) {
6305 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6306 "2576 Failed allocate memory for "
6307 "fast-path EQ record array\n");
6308 goto out_free_sp_eq;
6309 }
6277 } 6310 }
6278 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 6311 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
6279 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 6312 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
@@ -6290,10 +6323,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6290 * Create Complete Queues (CQs) 6323 * Create Complete Queues (CQs)
6291 */ 6324 */
6292 6325
6293 /* Get CQ depth from module parameter, fake the default for now */
6294 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
6295 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
6296
6297 /* Create slow-path Mailbox Command Complete Queue */ 6326 /* Create slow-path Mailbox Command Complete Queue */
6298 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6327 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6299 phba->sli4_hba.cq_ecount); 6328 phba->sli4_hba.cq_ecount);
@@ -6315,16 +6344,25 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6315 phba->sli4_hba.els_cq = qdesc; 6344 phba->sli4_hba.els_cq = qdesc;
6316 6345
6317 6346
6318 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ 6347 /*
6319 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 6348 * Create fast-path FCP Completion Queue(s), one-to-one with FCP EQs.
6320 phba->cfg_fcp_eq_count), GFP_KERNEL); 6349 * If there are no FCP EQs then create exactly one FCP CQ.
6350 */
6351 if (phba->cfg_fcp_eq_count)
6352 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
6353 phba->cfg_fcp_eq_count),
6354 GFP_KERNEL);
6355 else
6356 phba->sli4_hba.fcp_cq = kzalloc(sizeof(struct lpfc_queue *),
6357 GFP_KERNEL);
6321 if (!phba->sli4_hba.fcp_cq) { 6358 if (!phba->sli4_hba.fcp_cq) {
6322 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6359 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6323 "2577 Failed allocate memory for fast-path " 6360 "2577 Failed allocate memory for fast-path "
6324 "CQ record array\n"); 6361 "CQ record array\n");
6325 goto out_free_els_cq; 6362 goto out_free_els_cq;
6326 } 6363 }
6327 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 6364 fcp_cqidx = 0;
6365 do {
6328 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6366 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6329 phba->sli4_hba.cq_ecount); 6367 phba->sli4_hba.cq_ecount);
6330 if (!qdesc) { 6368 if (!qdesc) {
@@ -6334,7 +6372,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6334 goto out_free_fcp_cq; 6372 goto out_free_fcp_cq;
6335 } 6373 }
6336 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; 6374 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
6337 } 6375 } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
6338 6376
6339 /* Create Mailbox Command Queue */ 6377 /* Create Mailbox Command Queue */
6340 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 6378 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
@@ -6466,7 +6504,7 @@ out_error:
6466 * -ENOMEM - No available memory 6504 * -ENOMEM - No available memory
6467 * -EIO - The mailbox failed to complete successfully. 6505 * -EIO - The mailbox failed to complete successfully.
6468 **/ 6506 **/
6469static void 6507void
6470lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 6508lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6471{ 6509{
6472 int fcp_qidx; 6510 int fcp_qidx;
@@ -6814,8 +6852,10 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
6814 /* Unset ELS complete queue */ 6852 /* Unset ELS complete queue */
6815 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6853 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6816 /* Unset FCP response complete queue */ 6854 /* Unset FCP response complete queue */
6817 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6855 fcp_qidx = 0;
6856 do {
6818 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 6857 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
6858 } while (++fcp_qidx < phba->cfg_fcp_eq_count);
6819 /* Unset fast-path event queue */ 6859 /* Unset fast-path event queue */
6820 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6860 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6821 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); 6861 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
@@ -7995,6 +8035,7 @@ lpfc_sli4_unset_hba(struct lpfc_hba *phba)
7995 8035
7996 /* Reset SLI4 HBA FCoE function */ 8036 /* Reset SLI4 HBA FCoE function */
7997 lpfc_pci_function_reset(phba); 8037 lpfc_pci_function_reset(phba);
8038 lpfc_sli4_queue_destroy(phba);
7998 8039
7999 return; 8040 return;
8000} 8041}
@@ -8108,6 +8149,7 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
8108 8149
8109 /* Reset SLI4 HBA FCoE function */ 8150 /* Reset SLI4 HBA FCoE function */
8110 lpfc_pci_function_reset(phba); 8151 lpfc_pci_function_reset(phba);
8152 lpfc_sli4_queue_destroy(phba);
8111 8153
8112 /* Stop the SLI4 device port */ 8154 /* Stop the SLI4 device port */
8113 phba->pport->work_port_events = 0; 8155 phba->pport->work_port_events = 0;
@@ -9008,7 +9050,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9008 uint32_t cfg_mode, intr_mode; 9050 uint32_t cfg_mode, intr_mode;
9009 int mcnt; 9051 int mcnt;
9010 int adjusted_fcp_eq_count; 9052 int adjusted_fcp_eq_count;
9011 int fcp_qidx;
9012 const struct firmware *fw; 9053 const struct firmware *fw;
9013 uint8_t file_name[16]; 9054 uint8_t file_name[16];
9014 9055
@@ -9117,16 +9158,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9117 adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1; 9158 adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
9118 else 9159 else
9119 adjusted_fcp_eq_count = phba->cfg_fcp_eq_count; 9160 adjusted_fcp_eq_count = phba->cfg_fcp_eq_count;
9120 /* Free unused EQs */
9121 for (fcp_qidx = adjusted_fcp_eq_count;
9122 fcp_qidx < phba->cfg_fcp_eq_count;
9123 fcp_qidx++) {
9124 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
9125 /* do not delete the first fcp_cq */
9126 if (fcp_qidx)
9127 lpfc_sli4_queue_free(
9128 phba->sli4_hba.fcp_cq[fcp_qidx]);
9129 }
9130 phba->cfg_fcp_eq_count = adjusted_fcp_eq_count; 9161 phba->cfg_fcp_eq_count = adjusted_fcp_eq_count;
9131 /* Set up SLI-4 HBA */ 9162 /* Set up SLI-4 HBA */
9132 if (lpfc_sli4_hba_setup(phba)) { 9163 if (lpfc_sli4_hba_setup(phba)) {
@@ -9309,6 +9340,7 @@ lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
9309 9340
9310 /* Disable interrupt from device */ 9341 /* Disable interrupt from device */
9311 lpfc_sli4_disable_intr(phba); 9342 lpfc_sli4_disable_intr(phba);
9343 lpfc_sli4_queue_destroy(phba);
9312 9344
9313 /* Save device state to PCI config space */ 9345 /* Save device state to PCI config space */
9314 pci_save_state(pdev); 9346 pci_save_state(pdev);
@@ -9438,6 +9470,7 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
9438 9470
9439 /* Disable interrupt and pci device */ 9471 /* Disable interrupt and pci device */
9440 lpfc_sli4_disable_intr(phba); 9472 lpfc_sli4_disable_intr(phba);
9473 lpfc_sli4_queue_destroy(phba);
9441 pci_disable_device(phba->pcidev); 9474 pci_disable_device(phba->pcidev);
9442 9475
9443 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 9476 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index b9edfebf5091..c430aada02be 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -6127,12 +6127,20 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6127 goto out_free_mbox; 6127 goto out_free_mbox;
6128 } 6128 }
6129 6129
6130 /* Create all the SLI4 queues */
6131 rc = lpfc_sli4_queue_create(phba);
6132 if (rc) {
6133 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6134 "3089 Failed to allocate queues\n");
6135 rc = -ENODEV;
6136 goto out_stop_timers;
6137 }
6130 /* Set up all the queues to the device */ 6138 /* Set up all the queues to the device */
6131 rc = lpfc_sli4_queue_setup(phba); 6139 rc = lpfc_sli4_queue_setup(phba);
6132 if (unlikely(rc)) { 6140 if (unlikely(rc)) {
6133 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6141 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6134 "0381 Error %d during queue setup.\n ", rc); 6142 "0381 Error %d during queue setup.\n ", rc);
6135 goto out_stop_timers; 6143 goto out_destroy_queue;
6136 } 6144 }
6137 6145
6138 /* Arm the CQs and then EQs on device */ 6146 /* Arm the CQs and then EQs on device */
@@ -6205,15 +6213,20 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6205 spin_lock_irq(&phba->hbalock); 6213 spin_lock_irq(&phba->hbalock);
6206 phba->link_state = LPFC_LINK_DOWN; 6214 phba->link_state = LPFC_LINK_DOWN;
6207 spin_unlock_irq(&phba->hbalock); 6215 spin_unlock_irq(&phba->hbalock);
6208 if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) 6216 if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
6209 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 6217 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
6218 if (rc)
6219 goto out_unset_queue;
6220 }
6221 mempool_free(mboxq, phba->mbox_mem_pool);
6222 return rc;
6210out_unset_queue: 6223out_unset_queue:
6211 /* Unset all the queues set up in this routine when error out */ 6224 /* Unset all the queues set up in this routine when error out */
6212 if (rc) 6225 lpfc_sli4_queue_unset(phba);
6213 lpfc_sli4_queue_unset(phba); 6226out_destroy_queue:
6227 lpfc_sli4_queue_destroy(phba);
6214out_stop_timers: 6228out_stop_timers:
6215 if (rc) 6229 lpfc_stop_hba_timers(phba);
6216 lpfc_stop_hba_timers(phba);
6217out_free_mbox: 6230out_free_mbox:
6218 mempool_free(mboxq, phba->mbox_mem_pool); 6231 mempool_free(mboxq, phba->mbox_mem_pool);
6219 return rc; 6232 return rc;
@@ -9562,7 +9575,6 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
9562 9575
9563 /* now issue the command */ 9576 /* now issue the command */
9564 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 9577 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
9565
9566 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 9578 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
9567 wait_event_interruptible_timeout(done_q, 9579 wait_event_interruptible_timeout(done_q,
9568 pmboxq->mbox_flag & LPFC_MBX_WAKE, 9580 pmboxq->mbox_flag & LPFC_MBX_WAKE,
@@ -11319,6 +11331,8 @@ lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
11319 11331
11320 /* Get to the EQ struct associated with this vector */ 11332 /* Get to the EQ struct associated with this vector */
11321 speq = phba->sli4_hba.sp_eq; 11333 speq = phba->sli4_hba.sp_eq;
11334 if (unlikely(!speq))
11335 return IRQ_NONE;
11322 11336
11323 /* Check device state for handling interrupt */ 11337 /* Check device state for handling interrupt */
11324 if (unlikely(lpfc_intr_state_check(phba))) { 11338 if (unlikely(lpfc_intr_state_check(phba))) {
@@ -11396,6 +11410,8 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
11396 11410
11397 if (unlikely(!phba)) 11411 if (unlikely(!phba))
11398 return IRQ_NONE; 11412 return IRQ_NONE;
11413 if (unlikely(!phba->sli4_hba.fp_eq))
11414 return IRQ_NONE;
11399 11415
11400 /* Get to the EQ struct associated with this vector */ 11416 /* Get to the EQ struct associated with this vector */
11401 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx]; 11417 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];