aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_init.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-02 22:01:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-02 22:01:32 -0400
commit3151367f8778a1789d6f6e6f6c642681b6cd6d64 (patch)
tree1869d5429a25abd994ae94079808b8db060ec6f3 /drivers/scsi/lpfc/lpfc_init.c
parent16642a2e7be23bbda013fc32d8f6c68982eab603 (diff)
parentfe709ed827d370e6b0c0a9f9456da1c22bdcd118 (diff)
Merge tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull first round of SCSI updates from James Bottomley: "This is a large set of updates, mostly for drivers (qla2xxx [including support for new 83xx based card], qla4xxx, mpt2sas, bfa, zfcp, hpsa, be2iscsi, isci, lpfc, ipr, ibmvfc, ibmvscsi, megaraid_sas). There's also a rework for tape adding virtually unlimited numbers of tape drives plus a set of dif fixes for sd and a fix for a live lock on hot remove of SCSI devices. This round includes a signed tag pull of isci-for-3.6 Signed-off-by: James Bottomley <JBottomley@Parallels.com>" Fix up trivial conflict in drivers/scsi/qla2xxx/qla_nx.c due to new PCI helper function use in a function that was removed by this pull. * tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (198 commits) [SCSI] st: remove st_mutex [SCSI] sd: Ensure we correctly disable devices with unknown protection type [SCSI] hpsa: gen8plus Smart Array IDs [SCSI] qla4xxx: Update driver version to 5.03.00-k1 [SCSI] qla4xxx: Disable generating pause frames for ISP83XX [SCSI] qla4xxx: Fix double clearing of risc_intr for ISP83XX [SCSI] qla4xxx: IDC implementation for Loopback [SCSI] qla4xxx: update copyrights in LICENSE.qla4xxx [SCSI] qla4xxx: Fix panic while rmmod [SCSI] qla4xxx: Fail probe_adapter if IRQ allocation fails [SCSI] qla4xxx: Prevent MSI/MSI-X falling back to INTx for ISP82XX [SCSI] qla4xxx: Update idc reg in case of PCI AER [SCSI] qla4xxx: Fix double IDC locking in qla4_8xxx_error_recovery [SCSI] qla4xxx: Clear interrupt while unloading driver for ISP83XX [SCSI] qla4xxx: Print correct IDC version [SCSI] qla4xxx: Added new mbox cmd to pass driver version to FW [SCSI] scsi_dh_alua: Enable STPG for unavailable ports [SCSI] scsi_remove_target: fix softlockup regression on hot remove [SCSI] ibmvscsi: Fix host config length field overflow [SCSI] ibmvscsi: Remove backend abstraction ...
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_init.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c839
1 files changed, 408 insertions, 431 deletions
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 628a703abdd..8a55a586dd6 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -480,11 +480,11 @@ lpfc_config_port_post(struct lpfc_hba *phba)
480 phba->link_state = LPFC_LINK_DOWN; 480 phba->link_state = LPFC_LINK_DOWN;
481 481
482 /* Only process IOCBs on ELS ring till hba_state is READY */ 482 /* Only process IOCBs on ELS ring till hba_state is READY */
483 if (psli->ring[psli->extra_ring].cmdringaddr) 483 if (psli->ring[psli->extra_ring].sli.sli3.cmdringaddr)
484 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 484 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
485 if (psli->ring[psli->fcp_ring].cmdringaddr) 485 if (psli->ring[psli->fcp_ring].sli.sli3.cmdringaddr)
486 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 486 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
487 if (psli->ring[psli->next_ring].cmdringaddr) 487 if (psli->ring[psli->next_ring].sli.sli3.cmdringaddr)
488 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 488 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
489 489
490 /* Post receive buffers for desired rings */ 490 /* Post receive buffers for desired rings */
@@ -2059,6 +2059,11 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2059 oneConnect = 1; 2059 oneConnect = 1;
2060 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2060 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2061 break; 2061 break;
2062 case PCI_DEVICE_ID_SKYHAWK:
2063 case PCI_DEVICE_ID_SKYHAWK_VF:
2064 oneConnect = 1;
2065 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2066 break;
2062 default: 2067 default:
2063 m = (typeof(m)){"Unknown", "", ""}; 2068 m = (typeof(m)){"Unknown", "", ""};
2064 break; 2069 break;
@@ -4546,6 +4551,13 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4546 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 4551 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
4547 } 4552 }
4548 4553
4554 if (!phba->sli.ring)
4555 phba->sli.ring = (struct lpfc_sli_ring *)
4556 kzalloc(LPFC_SLI3_MAX_RING *
4557 sizeof(struct lpfc_sli_ring), GFP_KERNEL);
4558 if (!phba->sli.ring)
4559 return -ENOMEM;
4560
4549 /* 4561 /*
4550 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4562 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4551 * used to create the sg_dma_buf_pool must be dynamically calculated. 4563 * used to create the sg_dma_buf_pool must be dynamically calculated.
@@ -4690,6 +4702,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4690 /* Get all the module params for configuring this host */ 4702 /* Get all the module params for configuring this host */
4691 lpfc_get_cfgparam(phba); 4703 lpfc_get_cfgparam(phba);
4692 phba->max_vpi = LPFC_MAX_VPI; 4704 phba->max_vpi = LPFC_MAX_VPI;
4705
4706 /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */
4707 phba->cfg_fcp_io_channel = phba->cfg_fcp_eq_count;
4708
4693 /* This will be set to correct value after the read_config mbox */ 4709 /* This will be set to correct value after the read_config mbox */
4694 phba->max_vports = 0; 4710 phba->max_vports = 0;
4695 4711
@@ -4705,6 +4721,16 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4705 sges_per_segment = 2; 4721 sges_per_segment = 2;
4706 4722
4707 /* 4723 /*
4724 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
4725 * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple.
4726 */
4727 if (!phba->sli.ring)
4728 phba->sli.ring = kzalloc(
4729 (LPFC_SLI3_MAX_RING + phba->cfg_fcp_io_channel) *
4730 sizeof(struct lpfc_sli_ring), GFP_KERNEL);
4731 if (!phba->sli.ring)
4732 return -ENOMEM;
4733 /*
4708 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4734 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4709 * used to create the sg_dma_buf_pool must be dynamically calculated. 4735 * used to create the sg_dma_buf_pool must be dynamically calculated.
4710 * 2 segments are added since the IOCB needs a command and response bde. 4736 * 2 segments are added since the IOCB needs a command and response bde.
@@ -4909,21 +4935,15 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4909 goto out_remove_rpi_hdrs; 4935 goto out_remove_rpi_hdrs;
4910 } 4936 }
4911 4937
4912 /* 4938 phba->sli4_hba.fcp_eq_hdl =
4913 * The cfg_fcp_eq_count can be zero whenever there is exactly one 4939 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
4914 * interrupt vector. This is not an error 4940 phba->cfg_fcp_io_channel), GFP_KERNEL);
4915 */ 4941 if (!phba->sli4_hba.fcp_eq_hdl) {
4916 if (phba->cfg_fcp_eq_count) { 4942 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4917 phba->sli4_hba.fcp_eq_hdl = 4943 "2572 Failed allocate memory for "
4918 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 4944 "fast-path per-EQ handle array\n");
4919 phba->cfg_fcp_eq_count), GFP_KERNEL); 4945 rc = -ENOMEM;
4920 if (!phba->sli4_hba.fcp_eq_hdl) { 4946 goto out_free_fcf_rr_bmask;
4921 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4922 "2572 Failed allocate memory for "
4923 "fast-path per-EQ handle array\n");
4924 rc = -ENOMEM;
4925 goto out_free_fcf_rr_bmask;
4926 }
4927 } 4947 }
4928 4948
4929 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 4949 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
@@ -5550,6 +5570,10 @@ lpfc_hba_free(struct lpfc_hba *phba)
5550 /* Release the driver assigned board number */ 5570 /* Release the driver assigned board number */
5551 idr_remove(&lpfc_hba_index, phba->brd_no); 5571 idr_remove(&lpfc_hba_index, phba->brd_no);
5552 5572
5573 /* Free memory allocated with sli rings */
5574 kfree(phba->sli.ring);
5575 phba->sli.ring = NULL;
5576
5553 kfree(phba); 5577 kfree(phba);
5554 return; 5578 return;
5555} 5579}
@@ -6275,8 +6299,9 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
6275 uint32_t shdr_status, shdr_add_status; 6299 uint32_t shdr_status, shdr_add_status;
6276 struct lpfc_mbx_get_func_cfg *get_func_cfg; 6300 struct lpfc_mbx_get_func_cfg *get_func_cfg;
6277 struct lpfc_rsrc_desc_fcfcoe *desc; 6301 struct lpfc_rsrc_desc_fcfcoe *desc;
6302 char *pdesc_0;
6278 uint32_t desc_count; 6303 uint32_t desc_count;
6279 int length, i, rc = 0; 6304 int length, i, rc = 0, rc2;
6280 6305
6281 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6306 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6282 if (!pmb) { 6307 if (!pmb) {
@@ -6388,18 +6413,17 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
6388 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 6413 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
6389 length, LPFC_SLI4_MBX_EMBED); 6414 length, LPFC_SLI4_MBX_EMBED);
6390 6415
6391 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 6416 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6392 shdr = (union lpfc_sli4_cfg_shdr *) 6417 shdr = (union lpfc_sli4_cfg_shdr *)
6393 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 6418 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
6394 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6419 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6395 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6420 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6396 if (rc || shdr_status || shdr_add_status) { 6421 if (rc2 || shdr_status || shdr_add_status) {
6397 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6422 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6398 "3026 Mailbox failed , mbxCmd x%x " 6423 "3026 Mailbox failed , mbxCmd x%x "
6399 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 6424 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
6400 bf_get(lpfc_mqe_command, &pmb->u.mqe), 6425 bf_get(lpfc_mqe_command, &pmb->u.mqe),
6401 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 6426 bf_get(lpfc_mqe_status, &pmb->u.mqe));
6402 rc = -EIO;
6403 goto read_cfg_out; 6427 goto read_cfg_out;
6404 } 6428 }
6405 6429
@@ -6407,11 +6431,18 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
6407 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 6431 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
6408 desc_count = get_func_cfg->func_cfg.rsrc_desc_count; 6432 desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
6409 6433
6434 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
6435 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
6436 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
6437 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
6438 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
6439 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
6440 goto read_cfg_out;
6441
6410 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 6442 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
6411 desc = (struct lpfc_rsrc_desc_fcfcoe *) 6443 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
6412 &get_func_cfg->func_cfg.desc[i];
6413 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 6444 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
6414 bf_get(lpfc_rsrc_desc_pcie_type, desc)) { 6445 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
6415 phba->sli4_hba.iov.pf_number = 6446 phba->sli4_hba.iov.pf_number =
6416 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 6447 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
6417 phba->sli4_hba.iov.vf_number = 6448 phba->sli4_hba.iov.vf_number =
@@ -6425,13 +6456,11 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
6425 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 6456 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
6426 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 6457 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
6427 phba->sli4_hba.iov.vf_number); 6458 phba->sli4_hba.iov.vf_number);
6428 else { 6459 else
6429 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6460 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6430 "3028 GET_FUNCTION_CONFIG: failed to find " 6461 "3028 GET_FUNCTION_CONFIG: failed to find "
6431 "Resrouce Descriptor:x%x\n", 6462 "Resrouce Descriptor:x%x\n",
6432 LPFC_RSRC_DESC_TYPE_FCFCOE); 6463 LPFC_RSRC_DESC_TYPE_FCFCOE);
6433 rc = -EIO;
6434 }
6435 6464
6436read_cfg_out: 6465read_cfg_out:
6437 mempool_free(pmb, phba->mbox_mem_pool); 6466 mempool_free(pmb, phba->mbox_mem_pool);
@@ -6512,53 +6541,40 @@ lpfc_setup_endian_order(struct lpfc_hba *phba)
6512static int 6541static int
6513lpfc_sli4_queue_verify(struct lpfc_hba *phba) 6542lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6514{ 6543{
6515 int cfg_fcp_wq_count; 6544 int cfg_fcp_io_channel;
6516 int cfg_fcp_eq_count; 6545 uint32_t cpu;
6546 uint32_t i = 0;
6547
6517 6548
6518 /* 6549 /*
6519 * Sanity check for confiugred queue parameters against the run-time 6550 * Sanity check for configured queue parameters against the run-time
6520 * device parameters 6551 * device parameters
6521 */ 6552 */
6522 6553
6523 /* Sanity check on FCP fast-path WQ parameters */ 6554 /* Sanity check on HBA EQ parameters */
6524 cfg_fcp_wq_count = phba->cfg_fcp_wq_count; 6555 cfg_fcp_io_channel = phba->cfg_fcp_io_channel;
6525 if (cfg_fcp_wq_count > 6556
6526 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { 6557 /* It doesn't make sense to have more io channels then CPUs */
6527 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - 6558 for_each_online_cpu(cpu) {
6528 LPFC_SP_WQN_DEF; 6559 i++;
6529 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { 6560 }
6530 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6561 if (i < cfg_fcp_io_channel) {
6531 "2581 Not enough WQs (%d) from "
6532 "the pci function for supporting "
6533 "FCP WQs (%d)\n",
6534 phba->sli4_hba.max_cfg_param.max_wq,
6535 phba->cfg_fcp_wq_count);
6536 goto out_error;
6537 }
6538 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6562 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6539 "2582 Not enough WQs (%d) from the pci " 6563 "3188 Reducing IO channels to match number of "
6540 "function for supporting the requested " 6564 "CPUs: from %d to %d\n", cfg_fcp_io_channel, i);
6541 "FCP WQs (%d), the actual FCP WQs can " 6565 cfg_fcp_io_channel = i;
6542 "be supported: %d\n", 6566 }
6543 phba->sli4_hba.max_cfg_param.max_wq, 6567
6544 phba->cfg_fcp_wq_count, cfg_fcp_wq_count); 6568 if (cfg_fcp_io_channel >
6545 } 6569 phba->sli4_hba.max_cfg_param.max_eq) {
6546 /* The actual number of FCP work queues adopted */ 6570 cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq;
6547 phba->cfg_fcp_wq_count = cfg_fcp_wq_count; 6571 if (cfg_fcp_io_channel < LPFC_FCP_IO_CHAN_MIN) {
6548
6549 /* Sanity check on FCP fast-path EQ parameters */
6550 cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
6551 if (cfg_fcp_eq_count >
6552 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
6553 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
6554 LPFC_SP_EQN_DEF;
6555 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
6556 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6572 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6557 "2574 Not enough EQs (%d) from the " 6573 "2574 Not enough EQs (%d) from the "
6558 "pci function for supporting FCP " 6574 "pci function for supporting FCP "
6559 "EQs (%d)\n", 6575 "EQs (%d)\n",
6560 phba->sli4_hba.max_cfg_param.max_eq, 6576 phba->sli4_hba.max_cfg_param.max_eq,
6561 phba->cfg_fcp_eq_count); 6577 phba->cfg_fcp_io_channel);
6562 goto out_error; 6578 goto out_error;
6563 } 6579 }
6564 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6580 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -6567,22 +6583,16 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6567 "FCP EQs (%d), the actual FCP EQs can " 6583 "FCP EQs (%d), the actual FCP EQs can "
6568 "be supported: %d\n", 6584 "be supported: %d\n",
6569 phba->sli4_hba.max_cfg_param.max_eq, 6585 phba->sli4_hba.max_cfg_param.max_eq,
6570 phba->cfg_fcp_eq_count, cfg_fcp_eq_count); 6586 phba->cfg_fcp_io_channel, cfg_fcp_io_channel);
6571 }
6572 /* It does not make sense to have more EQs than WQs */
6573 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
6574 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6575 "2593 The FCP EQ count(%d) cannot be greater "
6576 "than the FCP WQ count(%d), limiting the "
6577 "FCP EQ count to %d\n", cfg_fcp_eq_count,
6578 phba->cfg_fcp_wq_count,
6579 phba->cfg_fcp_wq_count);
6580 cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
6581 } 6587 }
6588
6589 /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */
6590
6582 /* The actual number of FCP event queues adopted */ 6591 /* The actual number of FCP event queues adopted */
6583 phba->cfg_fcp_eq_count = cfg_fcp_eq_count; 6592 phba->cfg_fcp_eq_count = cfg_fcp_io_channel;
6584 /* The overall number of event queues used */ 6593 phba->cfg_fcp_wq_count = cfg_fcp_io_channel;
6585 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; 6594 phba->cfg_fcp_io_channel = cfg_fcp_io_channel;
6595 phba->sli4_hba.cfg_eqn = cfg_fcp_io_channel;
6586 6596
6587 /* Get EQ depth from module parameter, fake the default for now */ 6597 /* Get EQ depth from module parameter, fake the default for now */
6588 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 6598 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
@@ -6615,50 +6625,104 @@ int
6615lpfc_sli4_queue_create(struct lpfc_hba *phba) 6625lpfc_sli4_queue_create(struct lpfc_hba *phba)
6616{ 6626{
6617 struct lpfc_queue *qdesc; 6627 struct lpfc_queue *qdesc;
6618 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 6628 int idx;
6619 6629
6620 /* 6630 /*
6621 * Create Event Queues (EQs) 6631 * Create HBA Record arrays.
6622 */ 6632 */
6633 if (!phba->cfg_fcp_io_channel)
6634 return -ERANGE;
6623 6635
6624 /* Create slow path event queue */ 6636 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
6625 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 6637 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
6626 phba->sli4_hba.eq_ecount); 6638 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
6627 if (!qdesc) { 6639 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
6640 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
6641 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
6642
6643 phba->sli4_hba.hba_eq = kzalloc((sizeof(struct lpfc_queue *) *
6644 phba->cfg_fcp_io_channel), GFP_KERNEL);
6645 if (!phba->sli4_hba.hba_eq) {
6646 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6647 "2576 Failed allocate memory for "
6648 "fast-path EQ record array\n");
6649 goto out_error;
6650 }
6651
6652 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
6653 phba->cfg_fcp_io_channel), GFP_KERNEL);
6654 if (!phba->sli4_hba.fcp_cq) {
6628 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6655 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6629 "0496 Failed allocate slow-path EQ\n"); 6656 "2577 Failed allocate memory for fast-path "
6657 "CQ record array\n");
6658 goto out_error;
6659 }
6660
6661 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
6662 phba->cfg_fcp_io_channel), GFP_KERNEL);
6663 if (!phba->sli4_hba.fcp_wq) {
6664 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6665 "2578 Failed allocate memory for fast-path "
6666 "WQ record array\n");
6630 goto out_error; 6667 goto out_error;
6631 } 6668 }
6632 phba->sli4_hba.sp_eq = qdesc;
6633 6669
6634 /* 6670 /*
6635 * Create fast-path FCP Event Queue(s). The cfg_fcp_eq_count can be 6671 * Since the first EQ can have multiple CQs associated with it,
6636 * zero whenever there is exactly one interrupt vector. This is not 6672 * this array is used to quickly see if we have a FCP fast-path
6637 * an error. 6673 * CQ match.
6638 */ 6674 */
6639 if (phba->cfg_fcp_eq_count) { 6675 phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) *
6640 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * 6676 phba->cfg_fcp_io_channel), GFP_KERNEL);
6641 phba->cfg_fcp_eq_count), GFP_KERNEL); 6677 if (!phba->sli4_hba.fcp_cq_map) {
6642 if (!phba->sli4_hba.fp_eq) { 6678 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6643 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6679 "2545 Failed allocate memory for fast-path "
6644 "2576 Failed allocate memory for " 6680 "CQ map\n");
6645 "fast-path EQ record array\n"); 6681 goto out_error;
6646 goto out_free_sp_eq;
6647 }
6648 } 6682 }
6649 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 6683
6684 /*
6685 * Create HBA Event Queues (EQs). The cfg_fcp_io_channel specifies
6686 * how many EQs to create.
6687 */
6688 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
6689
6690 /* Create EQs */
6650 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 6691 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
6651 phba->sli4_hba.eq_ecount); 6692 phba->sli4_hba.eq_ecount);
6652 if (!qdesc) { 6693 if (!qdesc) {
6653 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6694 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6654 "0497 Failed allocate fast-path EQ\n"); 6695 "0497 Failed allocate EQ (%d)\n", idx);
6655 goto out_free_fp_eq; 6696 goto out_error;
6656 } 6697 }
6657 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; 6698 phba->sli4_hba.hba_eq[idx] = qdesc;
6699
6700 /* Create Fast Path FCP CQs */
6701 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6702 phba->sli4_hba.cq_ecount);
6703 if (!qdesc) {
6704 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6705 "0499 Failed allocate fast-path FCP "
6706 "CQ (%d)\n", idx);
6707 goto out_error;
6708 }
6709 phba->sli4_hba.fcp_cq[idx] = qdesc;
6710
6711 /* Create Fast Path FCP WQs */
6712 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6713 phba->sli4_hba.wq_ecount);
6714 if (!qdesc) {
6715 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6716 "0503 Failed allocate fast-path FCP "
6717 "WQ (%d)\n", idx);
6718 goto out_error;
6719 }
6720 phba->sli4_hba.fcp_wq[idx] = qdesc;
6658 } 6721 }
6659 6722
6723
6660 /* 6724 /*
6661 * Create Complete Queues (CQs) 6725 * Create Slow Path Completion Queues (CQs)
6662 */ 6726 */
6663 6727
6664 /* Create slow-path Mailbox Command Complete Queue */ 6728 /* Create slow-path Mailbox Command Complete Queue */
@@ -6667,7 +6731,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6667 if (!qdesc) { 6731 if (!qdesc) {
6668 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6732 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6669 "0500 Failed allocate slow-path mailbox CQ\n"); 6733 "0500 Failed allocate slow-path mailbox CQ\n");
6670 goto out_free_fp_eq; 6734 goto out_error;
6671 } 6735 }
6672 phba->sli4_hba.mbx_cq = qdesc; 6736 phba->sli4_hba.mbx_cq = qdesc;
6673 6737
@@ -6677,59 +6741,29 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6677 if (!qdesc) { 6741 if (!qdesc) {
6678 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6742 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6679 "0501 Failed allocate slow-path ELS CQ\n"); 6743 "0501 Failed allocate slow-path ELS CQ\n");
6680 goto out_free_mbx_cq; 6744 goto out_error;
6681 } 6745 }
6682 phba->sli4_hba.els_cq = qdesc; 6746 phba->sli4_hba.els_cq = qdesc;
6683 6747
6684 6748
6685 /* 6749 /*
6686 * Create fast-path FCP Completion Queue(s), one-to-one with FCP EQs. 6750 * Create Slow Path Work Queues (WQs)
6687 * If there are no FCP EQs then create exactly one FCP CQ.
6688 */ 6751 */
6689 if (phba->cfg_fcp_eq_count)
6690 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
6691 phba->cfg_fcp_eq_count),
6692 GFP_KERNEL);
6693 else
6694 phba->sli4_hba.fcp_cq = kzalloc(sizeof(struct lpfc_queue *),
6695 GFP_KERNEL);
6696 if (!phba->sli4_hba.fcp_cq) {
6697 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6698 "2577 Failed allocate memory for fast-path "
6699 "CQ record array\n");
6700 goto out_free_els_cq;
6701 }
6702 fcp_cqidx = 0;
6703 do {
6704 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6705 phba->sli4_hba.cq_ecount);
6706 if (!qdesc) {
6707 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6708 "0499 Failed allocate fast-path FCP "
6709 "CQ (%d)\n", fcp_cqidx);
6710 goto out_free_fcp_cq;
6711 }
6712 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
6713 } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
6714 6752
6715 /* Create Mailbox Command Queue */ 6753 /* Create Mailbox Command Queue */
6716 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
6717 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
6718 6754
6719 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 6755 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
6720 phba->sli4_hba.mq_ecount); 6756 phba->sli4_hba.mq_ecount);
6721 if (!qdesc) { 6757 if (!qdesc) {
6722 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6758 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6723 "0505 Failed allocate slow-path MQ\n"); 6759 "0505 Failed allocate slow-path MQ\n");
6724 goto out_free_fcp_cq; 6760 goto out_error;
6725 } 6761 }
6726 phba->sli4_hba.mbx_wq = qdesc; 6762 phba->sli4_hba.mbx_wq = qdesc;
6727 6763
6728 /* 6764 /*
6729 * Create all the Work Queues (WQs) 6765 * Create ELS Work Queues
6730 */ 6766 */
6731 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
6732 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
6733 6767
6734 /* Create slow-path ELS Work Queue */ 6768 /* Create slow-path ELS Work Queue */
6735 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 6769 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
@@ -6737,36 +6771,13 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6737 if (!qdesc) { 6771 if (!qdesc) {
6738 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6772 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6739 "0504 Failed allocate slow-path ELS WQ\n"); 6773 "0504 Failed allocate slow-path ELS WQ\n");
6740 goto out_free_mbx_wq; 6774 goto out_error;
6741 } 6775 }
6742 phba->sli4_hba.els_wq = qdesc; 6776 phba->sli4_hba.els_wq = qdesc;
6743 6777
6744 /* Create fast-path FCP Work Queue(s) */
6745 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
6746 phba->cfg_fcp_wq_count), GFP_KERNEL);
6747 if (!phba->sli4_hba.fcp_wq) {
6748 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6749 "2578 Failed allocate memory for fast-path "
6750 "WQ record array\n");
6751 goto out_free_els_wq;
6752 }
6753 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6754 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6755 phba->sli4_hba.wq_ecount);
6756 if (!qdesc) {
6757 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6758 "0503 Failed allocate fast-path FCP "
6759 "WQ (%d)\n", fcp_wqidx);
6760 goto out_free_fcp_wq;
6761 }
6762 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
6763 }
6764
6765 /* 6778 /*
6766 * Create Receive Queue (RQ) 6779 * Create Receive Queue (RQ)
6767 */ 6780 */
6768 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
6769 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
6770 6781
6771 /* Create Receive Queue for header */ 6782 /* Create Receive Queue for header */
6772 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 6783 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
@@ -6774,7 +6785,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6774 if (!qdesc) { 6785 if (!qdesc) {
6775 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6786 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6776 "0506 Failed allocate receive HRQ\n"); 6787 "0506 Failed allocate receive HRQ\n");
6777 goto out_free_fcp_wq; 6788 goto out_error;
6778 } 6789 }
6779 phba->sli4_hba.hdr_rq = qdesc; 6790 phba->sli4_hba.hdr_rq = qdesc;
6780 6791
@@ -6784,52 +6795,14 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6784 if (!qdesc) { 6795 if (!qdesc) {
6785 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6796 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6786 "0507 Failed allocate receive DRQ\n"); 6797 "0507 Failed allocate receive DRQ\n");
6787 goto out_free_hdr_rq; 6798 goto out_error;
6788 } 6799 }
6789 phba->sli4_hba.dat_rq = qdesc; 6800 phba->sli4_hba.dat_rq = qdesc;
6790 6801
6791 return 0; 6802 return 0;
6792 6803
6793out_free_hdr_rq:
6794 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
6795 phba->sli4_hba.hdr_rq = NULL;
6796out_free_fcp_wq:
6797 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
6798 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
6799 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
6800 }
6801 kfree(phba->sli4_hba.fcp_wq);
6802 phba->sli4_hba.fcp_wq = NULL;
6803out_free_els_wq:
6804 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6805 phba->sli4_hba.els_wq = NULL;
6806out_free_mbx_wq:
6807 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
6808 phba->sli4_hba.mbx_wq = NULL;
6809out_free_fcp_cq:
6810 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
6811 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
6812 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
6813 }
6814 kfree(phba->sli4_hba.fcp_cq);
6815 phba->sli4_hba.fcp_cq = NULL;
6816out_free_els_cq:
6817 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6818 phba->sli4_hba.els_cq = NULL;
6819out_free_mbx_cq:
6820 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
6821 phba->sli4_hba.mbx_cq = NULL;
6822out_free_fp_eq:
6823 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
6824 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
6825 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
6826 }
6827 kfree(phba->sli4_hba.fp_eq);
6828 phba->sli4_hba.fp_eq = NULL;
6829out_free_sp_eq:
6830 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6831 phba->sli4_hba.sp_eq = NULL;
6832out_error: 6804out_error:
6805 lpfc_sli4_queue_destroy(phba);
6833 return -ENOMEM; 6806 return -ENOMEM;
6834} 6807}
6835 6808
@@ -6848,58 +6821,86 @@ out_error:
6848void 6821void
6849lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 6822lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6850{ 6823{
6851 int fcp_qidx; 6824 int idx;
6825
6826 if (phba->sli4_hba.hba_eq != NULL) {
6827 /* Release HBA event queue */
6828 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
6829 if (phba->sli4_hba.hba_eq[idx] != NULL) {
6830 lpfc_sli4_queue_free(
6831 phba->sli4_hba.hba_eq[idx]);
6832 phba->sli4_hba.hba_eq[idx] = NULL;
6833 }
6834 }
6835 kfree(phba->sli4_hba.hba_eq);
6836 phba->sli4_hba.hba_eq = NULL;
6837 }
6838
6839 if (phba->sli4_hba.fcp_cq != NULL) {
6840 /* Release FCP completion queue */
6841 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
6842 if (phba->sli4_hba.fcp_cq[idx] != NULL) {
6843 lpfc_sli4_queue_free(
6844 phba->sli4_hba.fcp_cq[idx]);
6845 phba->sli4_hba.fcp_cq[idx] = NULL;
6846 }
6847 }
6848 kfree(phba->sli4_hba.fcp_cq);
6849 phba->sli4_hba.fcp_cq = NULL;
6850 }
6851
6852 if (phba->sli4_hba.fcp_wq != NULL) {
6853 /* Release FCP work queue */
6854 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
6855 if (phba->sli4_hba.fcp_wq[idx] != NULL) {
6856 lpfc_sli4_queue_free(
6857 phba->sli4_hba.fcp_wq[idx]);
6858 phba->sli4_hba.fcp_wq[idx] = NULL;
6859 }
6860 }
6861 kfree(phba->sli4_hba.fcp_wq);
6862 phba->sli4_hba.fcp_wq = NULL;
6863 }
6864
6865 /* Release FCP CQ mapping array */
6866 if (phba->sli4_hba.fcp_cq_map != NULL) {
6867 kfree(phba->sli4_hba.fcp_cq_map);
6868 phba->sli4_hba.fcp_cq_map = NULL;
6869 }
6852 6870
6853 /* Release mailbox command work queue */ 6871 /* Release mailbox command work queue */
6854 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 6872 if (phba->sli4_hba.mbx_wq != NULL) {
6855 phba->sli4_hba.mbx_wq = NULL; 6873 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
6874 phba->sli4_hba.mbx_wq = NULL;
6875 }
6856 6876
6857 /* Release ELS work queue */ 6877 /* Release ELS work queue */
6858 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 6878 if (phba->sli4_hba.els_wq != NULL) {
6859 phba->sli4_hba.els_wq = NULL; 6879 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6860 6880 phba->sli4_hba.els_wq = NULL;
6861 /* Release FCP work queue */ 6881 }
6862 if (phba->sli4_hba.fcp_wq != NULL)
6863 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
6864 fcp_qidx++)
6865 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
6866 kfree(phba->sli4_hba.fcp_wq);
6867 phba->sli4_hba.fcp_wq = NULL;
6868 6882
6869 /* Release unsolicited receive queue */ 6883 /* Release unsolicited receive queue */
6870 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 6884 if (phba->sli4_hba.hdr_rq != NULL) {
6871 phba->sli4_hba.hdr_rq = NULL; 6885 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
6872 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 6886 phba->sli4_hba.hdr_rq = NULL;
6873 phba->sli4_hba.dat_rq = NULL; 6887 }
6888 if (phba->sli4_hba.dat_rq != NULL) {
6889 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
6890 phba->sli4_hba.dat_rq = NULL;
6891 }
6874 6892
6875 /* Release ELS complete queue */ 6893 /* Release ELS complete queue */
6876 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 6894 if (phba->sli4_hba.els_cq != NULL) {
6877 phba->sli4_hba.els_cq = NULL; 6895 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6896 phba->sli4_hba.els_cq = NULL;
6897 }
6878 6898
6879 /* Release mailbox command complete queue */ 6899 /* Release mailbox command complete queue */
6880 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 6900 if (phba->sli4_hba.mbx_cq != NULL) {
6881 phba->sli4_hba.mbx_cq = NULL; 6901 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
6882 6902 phba->sli4_hba.mbx_cq = NULL;
6883 /* Release FCP response complete queue */ 6903 }
6884 fcp_qidx = 0;
6885 if (phba->sli4_hba.fcp_cq != NULL)
6886 do
6887 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
6888 while (++fcp_qidx < phba->cfg_fcp_eq_count);
6889 kfree(phba->sli4_hba.fcp_cq);
6890 phba->sli4_hba.fcp_cq = NULL;
6891
6892 /* Release fast-path event queue */
6893 if (phba->sli4_hba.fp_eq != NULL)
6894 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
6895 fcp_qidx++)
6896 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
6897 kfree(phba->sli4_hba.fp_eq);
6898 phba->sli4_hba.fp_eq = NULL;
6899
6900 /* Release slow-path event queue */
6901 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6902 phba->sli4_hba.sp_eq = NULL;
6903 6904
6904 return; 6905 return;
6905} 6906}
@@ -6919,61 +6920,124 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6919int 6920int
6920lpfc_sli4_queue_setup(struct lpfc_hba *phba) 6921lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6921{ 6922{
6923 struct lpfc_sli *psli = &phba->sli;
6924 struct lpfc_sli_ring *pring;
6922 int rc = -ENOMEM; 6925 int rc = -ENOMEM;
6923 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 6926 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
6924 int fcp_cq_index = 0; 6927 int fcp_cq_index = 0;
6925 6928
6926 /* 6929 /*
6927 * Set up Event Queues (EQs) 6930 * Set up HBA Event Queues (EQs)
6928 */ 6931 */
6929 6932
6930 /* Set up slow-path event queue */ 6933 /* Set up HBA event queue */
6931 if (!phba->sli4_hba.sp_eq) { 6934 if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) {
6932 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6933 "0520 Slow-path EQ not allocated\n");
6934 goto out_error;
6935 }
6936 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
6937 LPFC_SP_DEF_IMAX);
6938 if (rc) {
6939 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6940 "0521 Failed setup of slow-path EQ: "
6941 "rc = 0x%x\n", rc);
6942 goto out_error;
6943 }
6944 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6945 "2583 Slow-path EQ setup: queue-id=%d\n",
6946 phba->sli4_hba.sp_eq->queue_id);
6947
6948 /* Set up fast-path event queue */
6949 if (phba->cfg_fcp_eq_count && !phba->sli4_hba.fp_eq) {
6950 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6935 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6951 "3147 Fast-path EQs not allocated\n"); 6936 "3147 Fast-path EQs not allocated\n");
6952 rc = -ENOMEM; 6937 rc = -ENOMEM;
6953 goto out_destroy_sp_eq; 6938 goto out_error;
6954 } 6939 }
6955 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 6940 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
6956 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { 6941 if (!phba->sli4_hba.hba_eq[fcp_eqidx]) {
6957 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6942 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6958 "0522 Fast-path EQ (%d) not " 6943 "0522 Fast-path EQ (%d) not "
6959 "allocated\n", fcp_eqidx); 6944 "allocated\n", fcp_eqidx);
6960 rc = -ENOMEM; 6945 rc = -ENOMEM;
6961 goto out_destroy_fp_eq; 6946 goto out_destroy_hba_eq;
6962 } 6947 }
6963 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], 6948 rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx],
6964 phba->cfg_fcp_imax); 6949 (phba->cfg_fcp_imax / phba->cfg_fcp_io_channel));
6965 if (rc) { 6950 if (rc) {
6966 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6951 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6967 "0523 Failed setup of fast-path EQ " 6952 "0523 Failed setup of fast-path EQ "
6968 "(%d), rc = 0x%x\n", fcp_eqidx, rc); 6953 "(%d), rc = 0x%x\n", fcp_eqidx, rc);
6969 goto out_destroy_fp_eq; 6954 goto out_destroy_hba_eq;
6970 } 6955 }
6971 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6956 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6972 "2584 Fast-path EQ setup: " 6957 "2584 HBA EQ setup: "
6973 "queue[%d]-id=%d\n", fcp_eqidx, 6958 "queue[%d]-id=%d\n", fcp_eqidx,
6974 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); 6959 phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id);
6975 } 6960 }
6976 6961
6962 /* Set up fast-path FCP Response Complete Queue */
6963 if (!phba->sli4_hba.fcp_cq) {
6964 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6965 "3148 Fast-path FCP CQ array not "
6966 "allocated\n");
6967 rc = -ENOMEM;
6968 goto out_destroy_hba_eq;
6969 }
6970
6971 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) {
6972 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
6973 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6974 "0526 Fast-path FCP CQ (%d) not "
6975 "allocated\n", fcp_cqidx);
6976 rc = -ENOMEM;
6977 goto out_destroy_fcp_cq;
6978 }
6979 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
6980 phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP);
6981 if (rc) {
6982 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6983 "0527 Failed setup of fast-path FCP "
6984 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
6985 goto out_destroy_fcp_cq;
6986 }
6987
6988 /* Setup fcp_cq_map for fast lookup */
6989 phba->sli4_hba.fcp_cq_map[fcp_cqidx] =
6990 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id;
6991
6992 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6993 "2588 FCP CQ setup: cq[%d]-id=%d, "
6994 "parent seq[%d]-id=%d\n",
6995 fcp_cqidx,
6996 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
6997 fcp_cqidx,
6998 phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id);
6999 }
7000
7001 /* Set up fast-path FCP Work Queue */
7002 if (!phba->sli4_hba.fcp_wq) {
7003 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7004 "3149 Fast-path FCP WQ array not "
7005 "allocated\n");
7006 rc = -ENOMEM;
7007 goto out_destroy_fcp_cq;
7008 }
7009
7010 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) {
7011 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
7012 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7013 "0534 Fast-path FCP WQ (%d) not "
7014 "allocated\n", fcp_wqidx);
7015 rc = -ENOMEM;
7016 goto out_destroy_fcp_wq;
7017 }
7018 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
7019 phba->sli4_hba.fcp_cq[fcp_wqidx],
7020 LPFC_FCP);
7021 if (rc) {
7022 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7023 "0535 Failed setup of fast-path FCP "
7024 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
7025 goto out_destroy_fcp_wq;
7026 }
7027
7028 /* Bind this WQ to the next FCP ring */
7029 pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx];
7030 pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx];
7031 phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring;
7032
7033 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7034 "2591 FCP WQ setup: wq[%d]-id=%d, "
7035 "parent cq[%d]-id=%d\n",
7036 fcp_wqidx,
7037 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
7038 fcp_cq_index,
7039 phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id);
7040 }
6977 /* 7041 /*
6978 * Set up Complete Queues (CQs) 7042 * Set up Complete Queues (CQs)
6979 */ 7043 */
@@ -6983,20 +7047,20 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6983 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7047 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6984 "0528 Mailbox CQ not allocated\n"); 7048 "0528 Mailbox CQ not allocated\n");
6985 rc = -ENOMEM; 7049 rc = -ENOMEM;
6986 goto out_destroy_fp_eq; 7050 goto out_destroy_fcp_wq;
6987 } 7051 }
6988 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, 7052 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq,
6989 LPFC_MCQ, LPFC_MBOX); 7053 phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX);
6990 if (rc) { 7054 if (rc) {
6991 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7055 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6992 "0529 Failed setup of slow-path mailbox CQ: " 7056 "0529 Failed setup of slow-path mailbox CQ: "
6993 "rc = 0x%x\n", rc); 7057 "rc = 0x%x\n", rc);
6994 goto out_destroy_fp_eq; 7058 goto out_destroy_fcp_wq;
6995 } 7059 }
6996 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7060 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6997 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", 7061 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
6998 phba->sli4_hba.mbx_cq->queue_id, 7062 phba->sli4_hba.mbx_cq->queue_id,
6999 phba->sli4_hba.sp_eq->queue_id); 7063 phba->sli4_hba.hba_eq[0]->queue_id);
7000 7064
7001 /* Set up slow-path ELS Complete Queue */ 7065 /* Set up slow-path ELS Complete Queue */
7002 if (!phba->sli4_hba.els_cq) { 7066 if (!phba->sli4_hba.els_cq) {
@@ -7005,8 +7069,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7005 rc = -ENOMEM; 7069 rc = -ENOMEM;
7006 goto out_destroy_mbx_cq; 7070 goto out_destroy_mbx_cq;
7007 } 7071 }
7008 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, 7072 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq,
7009 LPFC_WCQ, LPFC_ELS); 7073 phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS);
7010 if (rc) { 7074 if (rc) {
7011 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7012 "0531 Failed setup of slow-path ELS CQ: " 7076 "0531 Failed setup of slow-path ELS CQ: "
@@ -7016,52 +7080,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7016 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7080 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7017 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", 7081 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
7018 phba->sli4_hba.els_cq->queue_id, 7082 phba->sli4_hba.els_cq->queue_id,
7019 phba->sli4_hba.sp_eq->queue_id); 7083 phba->sli4_hba.hba_eq[0]->queue_id);
7020
7021 /* Set up fast-path FCP Response Complete Queue */
7022 if (!phba->sli4_hba.fcp_cq) {
7023 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7024 "3148 Fast-path FCP CQ array not "
7025 "allocated\n");
7026 rc = -ENOMEM;
7027 goto out_destroy_els_cq;
7028 }
7029 fcp_cqidx = 0;
7030 do {
7031 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
7032 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7033 "0526 Fast-path FCP CQ (%d) not "
7034 "allocated\n", fcp_cqidx);
7035 rc = -ENOMEM;
7036 goto out_destroy_fcp_cq;
7037 }
7038 if (phba->cfg_fcp_eq_count)
7039 rc = lpfc_cq_create(phba,
7040 phba->sli4_hba.fcp_cq[fcp_cqidx],
7041 phba->sli4_hba.fp_eq[fcp_cqidx],
7042 LPFC_WCQ, LPFC_FCP);
7043 else
7044 rc = lpfc_cq_create(phba,
7045 phba->sli4_hba.fcp_cq[fcp_cqidx],
7046 phba->sli4_hba.sp_eq,
7047 LPFC_WCQ, LPFC_FCP);
7048 if (rc) {
7049 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7050 "0527 Failed setup of fast-path FCP "
7051 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
7052 goto out_destroy_fcp_cq;
7053 }
7054 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7055 "2588 FCP CQ setup: cq[%d]-id=%d, "
7056 "parent %seq[%d]-id=%d\n",
7057 fcp_cqidx,
7058 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
7059 (phba->cfg_fcp_eq_count) ? "" : "sp_",
7060 fcp_cqidx,
7061 (phba->cfg_fcp_eq_count) ?
7062 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id :
7063 phba->sli4_hba.sp_eq->queue_id);
7064 } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
7065 7084
7066 /* 7085 /*
7067 * Set up all the Work Queues (WQs) 7086 * Set up all the Work Queues (WQs)
@@ -7072,7 +7091,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7072 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7091 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7073 "0538 Slow-path MQ not allocated\n"); 7092 "0538 Slow-path MQ not allocated\n");
7074 rc = -ENOMEM; 7093 rc = -ENOMEM;
7075 goto out_destroy_fcp_cq; 7094 goto out_destroy_els_cq;
7076 } 7095 }
7077 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 7096 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
7078 phba->sli4_hba.mbx_cq, LPFC_MBOX); 7097 phba->sli4_hba.mbx_cq, LPFC_MBOX);
@@ -7080,7 +7099,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7080 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7099 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7081 "0539 Failed setup of slow-path MQ: " 7100 "0539 Failed setup of slow-path MQ: "
7082 "rc = 0x%x\n", rc); 7101 "rc = 0x%x\n", rc);
7083 goto out_destroy_fcp_cq; 7102 goto out_destroy_els_cq;
7084 } 7103 }
7085 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7104 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7086 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 7105 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
@@ -7102,49 +7121,17 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7102 "rc = 0x%x\n", rc); 7121 "rc = 0x%x\n", rc);
7103 goto out_destroy_mbx_wq; 7122 goto out_destroy_mbx_wq;
7104 } 7123 }
7124
7125 /* Bind this WQ to the ELS ring */
7126 pring = &psli->ring[LPFC_ELS_RING];
7127 pring->sli.sli4.wqp = (void *)phba->sli4_hba.els_wq;
7128 phba->sli4_hba.els_cq->pring = pring;
7129
7105 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7130 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7106 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 7131 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
7107 phba->sli4_hba.els_wq->queue_id, 7132 phba->sli4_hba.els_wq->queue_id,
7108 phba->sli4_hba.els_cq->queue_id); 7133 phba->sli4_hba.els_cq->queue_id);
7109 7134
7110 /* Set up fast-path FCP Work Queue */
7111 if (!phba->sli4_hba.fcp_wq) {
7112 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7113 "3149 Fast-path FCP WQ array not "
7114 "allocated\n");
7115 rc = -ENOMEM;
7116 goto out_destroy_els_wq;
7117 }
7118 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
7119 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
7120 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7121 "0534 Fast-path FCP WQ (%d) not "
7122 "allocated\n", fcp_wqidx);
7123 rc = -ENOMEM;
7124 goto out_destroy_fcp_wq;
7125 }
7126 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
7127 phba->sli4_hba.fcp_cq[fcp_cq_index],
7128 LPFC_FCP);
7129 if (rc) {
7130 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7131 "0535 Failed setup of fast-path FCP "
7132 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
7133 goto out_destroy_fcp_wq;
7134 }
7135 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7136 "2591 FCP WQ setup: wq[%d]-id=%d, "
7137 "parent cq[%d]-id=%d\n",
7138 fcp_wqidx,
7139 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
7140 fcp_cq_index,
7141 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
7142 /* Round robin FCP Work Queue's Completion Queue assignment */
7143 if (phba->cfg_fcp_eq_count)
7144 fcp_cq_index = ((fcp_cq_index + 1) %
7145 phba->cfg_fcp_eq_count);
7146 }
7147
7148 /* 7135 /*
7149 * Create Receive Queue (RQ) 7136 * Create Receive Queue (RQ)
7150 */ 7137 */
@@ -7152,7 +7139,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7152 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7139 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7153 "0540 Receive Queue not allocated\n"); 7140 "0540 Receive Queue not allocated\n");
7154 rc = -ENOMEM; 7141 rc = -ENOMEM;
7155 goto out_destroy_fcp_wq; 7142 goto out_destroy_els_wq;
7156 } 7143 }
7157 7144
7158 lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ); 7145 lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
@@ -7175,25 +7162,23 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7175 phba->sli4_hba.els_cq->queue_id); 7162 phba->sli4_hba.els_cq->queue_id);
7176 return 0; 7163 return 0;
7177 7164
7178out_destroy_fcp_wq:
7179 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
7180 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
7181out_destroy_els_wq: 7165out_destroy_els_wq:
7182 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 7166 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
7183out_destroy_mbx_wq: 7167out_destroy_mbx_wq:
7184 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 7168 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
7185out_destroy_fcp_cq:
7186 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
7187 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
7188out_destroy_els_cq: 7169out_destroy_els_cq:
7189 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 7170 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
7190out_destroy_mbx_cq: 7171out_destroy_mbx_cq:
7191 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 7172 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
7192out_destroy_fp_eq: 7173out_destroy_fcp_wq:
7174 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
7175 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
7176out_destroy_fcp_cq:
7177 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
7178 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
7179out_destroy_hba_eq:
7193 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 7180 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
7194 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); 7181 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_eqidx]);
7195out_destroy_sp_eq:
7196 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
7197out_error: 7182out_error:
7198 return rc; 7183 return rc;
7199} 7184}
@@ -7222,27 +7207,27 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
7222 /* Unset unsolicited receive queue */ 7207 /* Unset unsolicited receive queue */
7223 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 7208 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
7224 /* Unset FCP work queue */ 7209 /* Unset FCP work queue */
7225 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 7210 if (phba->sli4_hba.fcp_wq) {
7226 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 7211 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7212 fcp_qidx++)
7213 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
7214 }
7227 /* Unset mailbox command complete queue */ 7215 /* Unset mailbox command complete queue */
7228 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 7216 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
7229 /* Unset ELS complete queue */ 7217 /* Unset ELS complete queue */
7230 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 7218 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
7231 /* Unset FCP response complete queue */ 7219 /* Unset FCP response complete queue */
7232 if (phba->sli4_hba.fcp_cq) { 7220 if (phba->sli4_hba.fcp_cq) {
7233 fcp_qidx = 0; 7221 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7234 do { 7222 fcp_qidx++)
7235 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 7223 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
7236 } while (++fcp_qidx < phba->cfg_fcp_eq_count);
7237 } 7224 }
7238 /* Unset fast-path event queue */ 7225 /* Unset fast-path event queue */
7239 if (phba->sli4_hba.fp_eq) { 7226 if (phba->sli4_hba.hba_eq) {
7240 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; 7227 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7241 fcp_qidx++) 7228 fcp_qidx++)
7242 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); 7229 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_qidx]);
7243 } 7230 }
7244 /* Unset slow-path event queue */
7245 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
7246} 7231}
7247 7232
7248/** 7233/**
@@ -7590,10 +7575,11 @@ lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
7590 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */ 7575 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */
7591 length = (sizeof(struct lpfc_mbx_nop) - 7576 length = (sizeof(struct lpfc_mbx_nop) -
7592 sizeof(struct lpfc_sli4_cfg_mhdr)); 7577 sizeof(struct lpfc_sli4_cfg_mhdr));
7593 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7594 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
7595 7578
7596 for (cmdsent = 0; cmdsent < cnt; cmdsent++) { 7579 for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
7580 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7581 LPFC_MBOX_OPCODE_NOP, length,
7582 LPFC_SLI4_MBX_EMBED);
7597 if (!phba->sli4_hba.intr_enable) 7583 if (!phba->sli4_hba.intr_enable)
7598 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7584 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7599 else { 7585 else {
@@ -8133,33 +8119,22 @@ enable_msix_vectors:
8133 "message=%d\n", index, 8119 "message=%d\n", index,
8134 phba->sli4_hba.msix_entries[index].vector, 8120 phba->sli4_hba.msix_entries[index].vector,
8135 phba->sli4_hba.msix_entries[index].entry); 8121 phba->sli4_hba.msix_entries[index].entry);
8122
8136 /* 8123 /*
8137 * Assign MSI-X vectors to interrupt handlers 8124 * Assign MSI-X vectors to interrupt handlers
8138 */ 8125 */
8139 if (vectors > 1) 8126 for (index = 0; index < vectors; index++) {
8140 rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 8127 memset(&phba->sli4_hba.handler_name[index], 0, 16);
8141 &lpfc_sli4_sp_intr_handler, IRQF_SHARED, 8128 sprintf((char *)&phba->sli4_hba.handler_name[index],
8142 LPFC_SP_DRIVER_HANDLER_NAME, phba); 8129 LPFC_DRIVER_HANDLER_NAME"%d", index);
8143 else
8144 /* All Interrupts need to be handled by one EQ */
8145 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
8146 &lpfc_sli4_intr_handler, IRQF_SHARED,
8147 LPFC_DRIVER_NAME, phba);
8148 if (rc) {
8149 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8150 "0485 MSI-X slow-path request_irq failed "
8151 "(%d)\n", rc);
8152 goto msi_fail_out;
8153 }
8154 8130
8155 /* The rest of the vector(s) are associated to fast-path handler(s) */ 8131 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8156 for (index = 1; index < vectors; index++) { 8132 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8157 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; 8133 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1);
8158 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
8159 rc = request_irq(phba->sli4_hba.msix_entries[index].vector, 8134 rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
8160 &lpfc_sli4_fp_intr_handler, IRQF_SHARED, 8135 &lpfc_sli4_hba_intr_handler, IRQF_SHARED,
8161 LPFC_FP_DRIVER_HANDLER_NAME, 8136 (char *)&phba->sli4_hba.handler_name[index],
8162 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 8137 &phba->sli4_hba.fcp_eq_hdl[index]);
8163 if (rc) { 8138 if (rc) {
8164 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8139 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8165 "0486 MSI-X fast-path (%d) " 8140 "0486 MSI-X fast-path (%d) "
@@ -8173,12 +8148,9 @@ enable_msix_vectors:
8173 8148
8174cfg_fail_out: 8149cfg_fail_out:
8175 /* free the irq already requested */ 8150 /* free the irq already requested */
8176 for (--index; index >= 1; index--) 8151 for (--index; index >= 0; index--)
8177 free_irq(phba->sli4_hba.msix_entries[index - 1].vector, 8152 free_irq(phba->sli4_hba.msix_entries[index].vector,
8178 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 8153 &phba->sli4_hba.fcp_eq_hdl[index]);
8179
8180 /* free the irq already requested */
8181 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
8182 8154
8183msi_fail_out: 8155msi_fail_out:
8184 /* Unconfigure MSI-X capability structure */ 8156 /* Unconfigure MSI-X capability structure */
@@ -8199,11 +8171,9 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)
8199 int index; 8171 int index;
8200 8172
8201 /* Free up MSI-X multi-message vectors */ 8173 /* Free up MSI-X multi-message vectors */
8202 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 8174 for (index = 0; index < phba->sli4_hba.msix_vec_nr; index++)
8203
8204 for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++)
8205 free_irq(phba->sli4_hba.msix_entries[index].vector, 8175 free_irq(phba->sli4_hba.msix_entries[index].vector,
8206 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 8176 &phba->sli4_hba.fcp_eq_hdl[index]);
8207 8177
8208 /* Disable MSI-X */ 8178 /* Disable MSI-X */
8209 pci_disable_msix(phba->pcidev); 8179 pci_disable_msix(phba->pcidev);
@@ -8249,7 +8219,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
8249 return rc; 8219 return rc;
8250 } 8220 }
8251 8221
8252 for (index = 0; index < phba->cfg_fcp_eq_count; index++) { 8222 for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
8253 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 8223 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8254 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 8224 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8255 } 8225 }
@@ -8329,10 +8299,12 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
8329 /* Indicate initialization to INTx mode */ 8299 /* Indicate initialization to INTx mode */
8330 phba->intr_type = INTx; 8300 phba->intr_type = INTx;
8331 intr_mode = 0; 8301 intr_mode = 0;
8332 for (index = 0; index < phba->cfg_fcp_eq_count; 8302 for (index = 0; index < phba->cfg_fcp_io_channel;
8333 index++) { 8303 index++) {
8334 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 8304 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8335 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 8305 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8306 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
8307 fcp_eq_in_use, 1);
8336 } 8308 }
8337 } 8309 }
8338 } 8310 }
@@ -9449,7 +9421,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9449 int error; 9421 int error;
9450 uint32_t cfg_mode, intr_mode; 9422 uint32_t cfg_mode, intr_mode;
9451 int mcnt; 9423 int mcnt;
9452 int adjusted_fcp_eq_count; 9424 int adjusted_fcp_io_channel;
9453 const struct firmware *fw; 9425 const struct firmware *fw;
9454 uint8_t file_name[16]; 9426 uint8_t file_name[16];
9455 9427
@@ -9552,13 +9524,13 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9552 } 9524 }
9553 /* Default to single EQ for non-MSI-X */ 9525 /* Default to single EQ for non-MSI-X */
9554 if (phba->intr_type != MSIX) 9526 if (phba->intr_type != MSIX)
9555 adjusted_fcp_eq_count = 0; 9527 adjusted_fcp_io_channel = 1;
9556 else if (phba->sli4_hba.msix_vec_nr < 9528 else if (phba->sli4_hba.msix_vec_nr <
9557 phba->cfg_fcp_eq_count + 1) 9529 phba->cfg_fcp_io_channel)
9558 adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1; 9530 adjusted_fcp_io_channel = phba->sli4_hba.msix_vec_nr;
9559 else 9531 else
9560 adjusted_fcp_eq_count = phba->cfg_fcp_eq_count; 9532 adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
9561 phba->cfg_fcp_eq_count = adjusted_fcp_eq_count; 9533 phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
9562 /* Set up SLI-4 HBA */ 9534 /* Set up SLI-4 HBA */
9563 if (lpfc_sli4_hba_setup(phba)) { 9535 if (lpfc_sli4_hba_setup(phba)) {
9564 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9536 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -9694,6 +9666,7 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
9694 * buffers are released to their corresponding pools here. 9666 * buffers are released to their corresponding pools here.
9695 */ 9667 */
9696 lpfc_scsi_free(phba); 9668 lpfc_scsi_free(phba);
9669
9697 lpfc_sli4_driver_resource_unset(phba); 9670 lpfc_sli4_driver_resource_unset(phba);
9698 9671
9699 /* Unmap adapter Control and Doorbell registers */ 9672 /* Unmap adapter Control and Doorbell registers */
@@ -10420,6 +10393,10 @@ static struct pci_device_id lpfc_id_table[] = {
10420 PCI_ANY_ID, PCI_ANY_ID, }, 10393 PCI_ANY_ID, PCI_ANY_ID, },
10421 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF, 10394 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
10422 PCI_ANY_ID, PCI_ANY_ID, }, 10395 PCI_ANY_ID, PCI_ANY_ID, },
10396 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK,
10397 PCI_ANY_ID, PCI_ANY_ID, },
10398 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF,
10399 PCI_ANY_ID, PCI_ANY_ID, },
10423 { 0 } 10400 { 0 }
10424}; 10401};
10425 10402