aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_init.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_init.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c545
1 files changed, 464 insertions, 81 deletions
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 7dda036a1af3..148b98ddbb1d 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -30,6 +30,7 @@
30#include <linux/ctype.h> 30#include <linux/ctype.h>
31#include <linux/aer.h> 31#include <linux/aer.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/firmware.h>
33 34
34#include <scsi/scsi.h> 35#include <scsi/scsi.h>
35#include <scsi/scsi_device.h> 36#include <scsi/scsi_device.h>
@@ -211,7 +212,6 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
211 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 212 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
212 if (!lpfc_vpd_data) 213 if (!lpfc_vpd_data)
213 goto out_free_mbox; 214 goto out_free_mbox;
214
215 do { 215 do {
216 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 216 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
217 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 217 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
@@ -309,6 +309,45 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
309} 309}
310 310
311/** 311/**
312 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
313 * cfg_soft_wwnn, cfg_soft_wwpn
314 * @vport: pointer to lpfc vport data structure.
315 *
316 *
317 * Return codes
318 * None.
319 **/
320void
321lpfc_update_vport_wwn(struct lpfc_vport *vport)
322{
323 /* If the soft name exists then update it using the service params */
324 if (vport->phba->cfg_soft_wwnn)
325 u64_to_wwn(vport->phba->cfg_soft_wwnn,
326 vport->fc_sparam.nodeName.u.wwn);
327 if (vport->phba->cfg_soft_wwpn)
328 u64_to_wwn(vport->phba->cfg_soft_wwpn,
329 vport->fc_sparam.portName.u.wwn);
330
331 /*
332 * If the name is empty or there exists a soft name
333 * then copy the service params name, otherwise use the fc name
334 */
335 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
336 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
337 sizeof(struct lpfc_name));
338 else
339 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
340 sizeof(struct lpfc_name));
341
342 if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
343 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
344 sizeof(struct lpfc_name));
345 else
346 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
347 sizeof(struct lpfc_name));
348}
349
350/**
312 * lpfc_config_port_post - Perform lpfc initialization after config port 351 * lpfc_config_port_post - Perform lpfc initialization after config port
313 * @phba: pointer to lpfc hba data structure. 352 * @phba: pointer to lpfc hba data structure.
314 * 353 *
@@ -377,17 +416,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
377 lpfc_mbuf_free(phba, mp->virt, mp->phys); 416 lpfc_mbuf_free(phba, mp->virt, mp->phys);
378 kfree(mp); 417 kfree(mp);
379 pmb->context1 = NULL; 418 pmb->context1 = NULL;
380 419 lpfc_update_vport_wwn(vport);
381 if (phba->cfg_soft_wwnn)
382 u64_to_wwn(phba->cfg_soft_wwnn,
383 vport->fc_sparam.nodeName.u.wwn);
384 if (phba->cfg_soft_wwpn)
385 u64_to_wwn(phba->cfg_soft_wwpn,
386 vport->fc_sparam.portName.u.wwn);
387 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
388 sizeof (struct lpfc_name));
389 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
390 sizeof (struct lpfc_name));
391 420
392 /* Update the fc_host data structures with new wwn. */ 421 /* Update the fc_host data structures with new wwn. */
393 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 422 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
@@ -573,7 +602,6 @@ lpfc_config_port_post(struct lpfc_hba *phba)
573 /* Clear all pending interrupts */ 602 /* Clear all pending interrupts */
574 writel(0xffffffff, phba->HAregaddr); 603 writel(0xffffffff, phba->HAregaddr);
575 readl(phba->HAregaddr); /* flush */ 604 readl(phba->HAregaddr); /* flush */
576
577 phba->link_state = LPFC_HBA_ERROR; 605 phba->link_state = LPFC_HBA_ERROR;
578 if (rc != MBX_BUSY) 606 if (rc != MBX_BUSY)
579 mempool_free(pmb, phba->mbox_mem_pool); 607 mempool_free(pmb, phba->mbox_mem_pool);
@@ -1755,7 +1783,9 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1755 && descp && descp[0] != '\0') 1783 && descp && descp[0] != '\0')
1756 return; 1784 return;
1757 1785
1758 if (phba->lmt & LMT_10Gb) 1786 if (phba->lmt & LMT_16Gb)
1787 max_speed = 16;
1788 else if (phba->lmt & LMT_10Gb)
1759 max_speed = 10; 1789 max_speed = 10;
1760 else if (phba->lmt & LMT_8Gb) 1790 else if (phba->lmt & LMT_8Gb)
1761 max_speed = 8; 1791 max_speed = 8;
@@ -1922,12 +1952,13 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1922 "Fibre Channel Adapter"}; 1952 "Fibre Channel Adapter"};
1923 break; 1953 break;
1924 case PCI_DEVICE_ID_LANCER_FC: 1954 case PCI_DEVICE_ID_LANCER_FC:
1925 oneConnect = 1; 1955 case PCI_DEVICE_ID_LANCER_FC_VF:
1926 m = (typeof(m)){"Undefined", "PCIe", "Fibre Channel Adapter"}; 1956 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
1927 break; 1957 break;
1928 case PCI_DEVICE_ID_LANCER_FCOE: 1958 case PCI_DEVICE_ID_LANCER_FCOE:
1959 case PCI_DEVICE_ID_LANCER_FCOE_VF:
1929 oneConnect = 1; 1960 oneConnect = 1;
1930 m = (typeof(m)){"Undefined", "PCIe", "FCoE"}; 1961 m = (typeof(m)){"OCe50100", "PCIe", "FCoE"};
1931 break; 1962 break;
1932 default: 1963 default:
1933 m = (typeof(m)){"Unknown", "", ""}; 1964 m = (typeof(m)){"Unknown", "", ""};
@@ -1936,7 +1967,8 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1936 1967
1937 if (mdp && mdp[0] == '\0') 1968 if (mdp && mdp[0] == '\0')
1938 snprintf(mdp, 79,"%s", m.name); 1969 snprintf(mdp, 79,"%s", m.name);
1939 /* oneConnect hba requires special processing, they are all initiators 1970 /*
1971 * oneConnect hba requires special processing, they are all initiators
1940 * and we put the port number on the end 1972 * and we put the port number on the end
1941 */ 1973 */
1942 if (descp && descp[0] == '\0') { 1974 if (descp && descp[0] == '\0') {
@@ -2656,6 +2688,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
2656 kfree(io); 2688 kfree(io);
2657 phba->total_iocbq_bufs--; 2689 phba->total_iocbq_bufs--;
2658 } 2690 }
2691
2659 spin_unlock_irq(&phba->hbalock); 2692 spin_unlock_irq(&phba->hbalock);
2660 return 0; 2693 return 0;
2661} 2694}
@@ -3612,6 +3645,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
3612 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3645 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3613 "2718 Clear Virtual Link Received for VPI 0x%x" 3646 "2718 Clear Virtual Link Received for VPI 0x%x"
3614 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 3647 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
3648
3615 vport = lpfc_find_vport_by_vpid(phba, 3649 vport = lpfc_find_vport_by_vpid(phba,
3616 acqe_fip->index - phba->vpi_base); 3650 acqe_fip->index - phba->vpi_base);
3617 ndlp = lpfc_sli4_perform_vport_cvl(vport); 3651 ndlp = lpfc_sli4_perform_vport_cvl(vport);
@@ -3935,6 +3969,10 @@ lpfc_enable_pci_dev(struct lpfc_hba *phba)
3935 pci_try_set_mwi(pdev); 3969 pci_try_set_mwi(pdev);
3936 pci_save_state(pdev); 3970 pci_save_state(pdev);
3937 3971
3972 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
3973 if (pci_find_capability(pdev, PCI_CAP_ID_EXP))
3974 pdev->needs_freset = 1;
3975
3938 return 0; 3976 return 0;
3939 3977
3940out_disable_device: 3978out_disable_device:
@@ -3997,6 +4035,36 @@ lpfc_reset_hba(struct lpfc_hba *phba)
3997} 4035}
3998 4036
3999/** 4037/**
4038 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
4039 * @phba: pointer to lpfc hba data structure.
4040 * @nr_vfn: number of virtual functions to be enabled.
4041 *
4042 * This function enables the PCI SR-IOV virtual functions to a physical
4043 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4044 * enable the number of virtual functions to the physical function. As
4045 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4046 * API call does not considered as an error condition for most of the device.
4047 **/
4048int
4049lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
4050{
4051 struct pci_dev *pdev = phba->pcidev;
4052 int rc;
4053
4054 rc = pci_enable_sriov(pdev, nr_vfn);
4055 if (rc) {
4056 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4057 "2806 Failed to enable sriov on this device "
4058 "with vfn number nr_vf:%d, rc:%d\n",
4059 nr_vfn, rc);
4060 } else
4061 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4062 "2807 Successful enable sriov on this device "
4063 "with vfn number nr_vf:%d\n", nr_vfn);
4064 return rc;
4065}
4066
4067/**
4000 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. 4068 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
4001 * @phba: pointer to lpfc hba data structure. 4069 * @phba: pointer to lpfc hba data structure.
4002 * 4070 *
@@ -4011,6 +4079,7 @@ static int
4011lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 4079lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4012{ 4080{
4013 struct lpfc_sli *psli; 4081 struct lpfc_sli *psli;
4082 int rc;
4014 4083
4015 /* 4084 /*
4016 * Initialize timers used by driver 4085 * Initialize timers used by driver
@@ -4085,6 +4154,23 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4085 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 4154 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
4086 return -ENOMEM; 4155 return -ENOMEM;
4087 4156
4157 /*
4158 * Enable sr-iov virtual functions if supported and configured
4159 * through the module parameter.
4160 */
4161 if (phba->cfg_sriov_nr_virtfn > 0) {
4162 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
4163 phba->cfg_sriov_nr_virtfn);
4164 if (rc) {
4165 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4166 "2808 Requested number of SR-IOV "
4167 "virtual functions (%d) is not "
4168 "supported\n",
4169 phba->cfg_sriov_nr_virtfn);
4170 phba->cfg_sriov_nr_virtfn = 0;
4171 }
4172 }
4173
4088 return 0; 4174 return 0;
4089} 4175}
4090 4176
@@ -4161,6 +4247,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4161 phba->fcf.redisc_wait.data = (unsigned long)phba; 4247 phba->fcf.redisc_wait.data = (unsigned long)phba;
4162 4248
4163 /* 4249 /*
4250 * Control structure for handling external multi-buffer mailbox
4251 * command pass-through.
4252 */
4253 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
4254 sizeof(struct lpfc_mbox_ext_buf_ctx));
4255 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4256
4257 /*
4164 * We need to do a READ_CONFIG mailbox command here before 4258 * We need to do a READ_CONFIG mailbox command here before
4165 * calling lpfc_get_cfgparam. For VFs this will report the 4259 * calling lpfc_get_cfgparam. For VFs this will report the
4166 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. 4260 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
@@ -4233,7 +4327,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4233 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); 4327 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
4234 4328
4235 /* 4329 /*
4236 * Initialize dirver internal slow-path work queues 4330 * Initialize driver internal slow-path work queues
4237 */ 4331 */
4238 4332
4239 /* Driver internel slow-path CQ Event pool */ 4333 /* Driver internel slow-path CQ Event pool */
@@ -4249,6 +4343,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4249 /* Receive queue CQ Event work queue list */ 4343 /* Receive queue CQ Event work queue list */
4250 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 4344 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
4251 4345
4346 /* Initialize extent block lists. */
4347 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
4348 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
4349 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
4350 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
4351
4252 /* Initialize the driver internal SLI layer lists. */ 4352 /* Initialize the driver internal SLI layer lists. */
4253 lpfc_sli_setup(phba); 4353 lpfc_sli_setup(phba);
4254 lpfc_sli_queue_setup(phba); 4354 lpfc_sli_queue_setup(phba);
@@ -4323,9 +4423,19 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4323 } 4423 }
4324 /* 4424 /*
4325 * Get sli4 parameters that override parameters from Port capabilities. 4425 * Get sli4 parameters that override parameters from Port capabilities.
4326 * If this call fails it is not a critical error so continue loading. 4426 * If this call fails, it isn't critical unless the SLI4 parameters come
4427 * back in conflict.
4327 */ 4428 */
4328 lpfc_get_sli4_parameters(phba, mboxq); 4429 rc = lpfc_get_sli4_parameters(phba, mboxq);
4430 if (rc) {
4431 if (phba->sli4_hba.extents_in_use &&
4432 phba->sli4_hba.rpi_hdrs_in_use) {
4433 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4434 "2999 Unsupported SLI4 Parameters "
4435 "Extents and RPI headers enabled.\n");
4436 goto out_free_bsmbx;
4437 }
4438 }
4329 mempool_free(mboxq, phba->mbox_mem_pool); 4439 mempool_free(mboxq, phba->mbox_mem_pool);
4330 /* Create all the SLI4 queues */ 4440 /* Create all the SLI4 queues */
4331 rc = lpfc_sli4_queue_create(phba); 4441 rc = lpfc_sli4_queue_create(phba);
@@ -4350,7 +4460,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4350 "1430 Failed to initialize sgl list.\n"); 4460 "1430 Failed to initialize sgl list.\n");
4351 goto out_free_sgl_list; 4461 goto out_free_sgl_list;
4352 } 4462 }
4353
4354 rc = lpfc_sli4_init_rpi_hdrs(phba); 4463 rc = lpfc_sli4_init_rpi_hdrs(phba);
4355 if (rc) { 4464 if (rc) {
4356 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4465 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -4366,6 +4475,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4366 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4475 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4367 "2759 Failed allocate memory for FCF round " 4476 "2759 Failed allocate memory for FCF round "
4368 "robin failover bmask\n"); 4477 "robin failover bmask\n");
4478 rc = -ENOMEM;
4369 goto out_remove_rpi_hdrs; 4479 goto out_remove_rpi_hdrs;
4370 } 4480 }
4371 4481
@@ -4375,6 +4485,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4375 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4485 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4376 "2572 Failed allocate memory for fast-path " 4486 "2572 Failed allocate memory for fast-path "
4377 "per-EQ handle array\n"); 4487 "per-EQ handle array\n");
4488 rc = -ENOMEM;
4378 goto out_free_fcf_rr_bmask; 4489 goto out_free_fcf_rr_bmask;
4379 } 4490 }
4380 4491
@@ -4384,9 +4495,27 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4384 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4495 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4385 "2573 Failed allocate memory for msi-x " 4496 "2573 Failed allocate memory for msi-x "
4386 "interrupt vector entries\n"); 4497 "interrupt vector entries\n");
4498 rc = -ENOMEM;
4387 goto out_free_fcp_eq_hdl; 4499 goto out_free_fcp_eq_hdl;
4388 } 4500 }
4389 4501
4502 /*
4503 * Enable sr-iov virtual functions if supported and configured
4504 * through the module parameter.
4505 */
4506 if (phba->cfg_sriov_nr_virtfn > 0) {
4507 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
4508 phba->cfg_sriov_nr_virtfn);
4509 if (rc) {
4510 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4511 "3020 Requested number of SR-IOV "
4512 "virtual functions (%d) is not "
4513 "supported\n",
4514 phba->cfg_sriov_nr_virtfn);
4515 phba->cfg_sriov_nr_virtfn = 0;
4516 }
4517 }
4518
4390 return rc; 4519 return rc;
4391 4520
4392out_free_fcp_eq_hdl: 4521out_free_fcp_eq_hdl:
@@ -4449,6 +4578,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4449 lpfc_sli4_cq_event_release_all(phba); 4578 lpfc_sli4_cq_event_release_all(phba);
4450 lpfc_sli4_cq_event_pool_destroy(phba); 4579 lpfc_sli4_cq_event_pool_destroy(phba);
4451 4580
4581 /* Release resource identifiers. */
4582 lpfc_sli4_dealloc_resource_identifiers(phba);
4583
4452 /* Free the bsmbx region. */ 4584 /* Free the bsmbx region. */
4453 lpfc_destroy_bootstrap_mbox(phba); 4585 lpfc_destroy_bootstrap_mbox(phba);
4454 4586
@@ -4649,6 +4781,7 @@ lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
4649 "Unloading driver.\n", __func__); 4781 "Unloading driver.\n", __func__);
4650 goto out_free_iocbq; 4782 goto out_free_iocbq;
4651 } 4783 }
4784 iocbq_entry->sli4_lxritag = NO_XRI;
4652 iocbq_entry->sli4_xritag = NO_XRI; 4785 iocbq_entry->sli4_xritag = NO_XRI;
4653 4786
4654 spin_lock_irq(&phba->hbalock); 4787 spin_lock_irq(&phba->hbalock);
@@ -4746,7 +4879,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
4746 4879
4747 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4880 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4748 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4881 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4749 "2400 lpfc_init_sgl_list els %d.\n", 4882 "2400 ELS XRI count %d.\n",
4750 els_xri_cnt); 4883 els_xri_cnt);
4751 /* Initialize and populate the sglq list per host/VF. */ 4884 /* Initialize and populate the sglq list per host/VF. */
4752 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 4885 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
@@ -4779,7 +4912,6 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
4779 phba->sli4_hba.scsi_xri_max = 4912 phba->sli4_hba.scsi_xri_max =
4780 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4913 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4781 phba->sli4_hba.scsi_xri_cnt = 0; 4914 phba->sli4_hba.scsi_xri_cnt = 0;
4782
4783 phba->sli4_hba.lpfc_scsi_psb_array = 4915 phba->sli4_hba.lpfc_scsi_psb_array =
4784 kzalloc((sizeof(struct lpfc_scsi_buf *) * 4916 kzalloc((sizeof(struct lpfc_scsi_buf *) *
4785 phba->sli4_hba.scsi_xri_max), GFP_KERNEL); 4917 phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
@@ -4802,13 +4934,6 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
4802 goto out_free_mem; 4934 goto out_free_mem;
4803 } 4935 }
4804 4936
4805 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
4806 if (sglq_entry->sli4_xritag == NO_XRI) {
4807 kfree(sglq_entry);
4808 printk(KERN_ERR "%s: failed to allocate XRI.\n"
4809 "Unloading driver.\n", __func__);
4810 goto out_free_mem;
4811 }
4812 sglq_entry->buff_type = GEN_BUFF_TYPE; 4937 sglq_entry->buff_type = GEN_BUFF_TYPE;
4813 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); 4938 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
4814 if (sglq_entry->virt == NULL) { 4939 if (sglq_entry->virt == NULL) {
@@ -4857,24 +4982,20 @@ int
4857lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 4982lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
4858{ 4983{
4859 int rc = 0; 4984 int rc = 0;
4860 int longs;
4861 uint16_t rpi_count;
4862 struct lpfc_rpi_hdr *rpi_hdr; 4985 struct lpfc_rpi_hdr *rpi_hdr;
4863 4986
4864 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 4987 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
4865
4866 /* 4988 /*
4867 * Provision an rpi bitmask range for discovery. The total count 4989 * If the SLI4 port supports extents, posting the rpi header isn't
4868 * is the difference between max and base + 1. 4990 * required. Set the expected maximum count and let the actual value
4991 * get set when extents are fully allocated.
4869 */ 4992 */
4870 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base + 4993 if (!phba->sli4_hba.rpi_hdrs_in_use) {
4871 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4994 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
4872 4995 return rc;
4873 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG; 4996 }
4874 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long), 4997 if (phba->sli4_hba.extents_in_use)
4875 GFP_KERNEL); 4998 return -EIO;
4876 if (!phba->sli4_hba.rpi_bmask)
4877 return -ENOMEM;
4878 4999
4879 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 5000 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
4880 if (!rpi_hdr) { 5001 if (!rpi_hdr) {
@@ -4908,11 +5029,28 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4908 struct lpfc_rpi_hdr *rpi_hdr; 5029 struct lpfc_rpi_hdr *rpi_hdr;
4909 uint32_t rpi_count; 5030 uint32_t rpi_count;
4910 5031
5032 /*
5033 * If the SLI4 port supports extents, posting the rpi header isn't
5034 * required. Set the expected maximum count and let the actual value
5035 * get set when extents are fully allocated.
5036 */
5037 if (!phba->sli4_hba.rpi_hdrs_in_use)
5038 return NULL;
5039 if (phba->sli4_hba.extents_in_use)
5040 return NULL;
5041
5042 /* The limit on the logical index is just the max_rpi count. */
4911 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 5043 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4912 phba->sli4_hba.max_cfg_param.max_rpi - 1; 5044 phba->sli4_hba.max_cfg_param.max_rpi - 1;
4913 5045
4914 spin_lock_irq(&phba->hbalock); 5046 spin_lock_irq(&phba->hbalock);
4915 curr_rpi_range = phba->sli4_hba.next_rpi; 5047 /*
5048 * Establish the starting RPI in this header block. The starting
5049 * rpi is normalized to a zero base because the physical rpi is
5050 * port based.
5051 */
5052 curr_rpi_range = phba->sli4_hba.next_rpi -
5053 phba->sli4_hba.max_cfg_param.rpi_base;
4916 spin_unlock_irq(&phba->hbalock); 5054 spin_unlock_irq(&phba->hbalock);
4917 5055
4918 /* 5056 /*
@@ -4925,6 +5063,8 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4925 else 5063 else
4926 rpi_count = LPFC_RPI_HDR_COUNT; 5064 rpi_count = LPFC_RPI_HDR_COUNT;
4927 5065
5066 if (!rpi_count)
5067 return NULL;
4928 /* 5068 /*
4929 * First allocate the protocol header region for the port. The 5069 * First allocate the protocol header region for the port. The
4930 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 5070 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
@@ -4957,12 +5097,14 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4957 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 5097 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
4958 rpi_hdr->page_count = 1; 5098 rpi_hdr->page_count = 1;
4959 spin_lock_irq(&phba->hbalock); 5099 spin_lock_irq(&phba->hbalock);
4960 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi; 5100
5101 /* The rpi_hdr stores the logical index only. */
5102 rpi_hdr->start_rpi = curr_rpi_range;
4961 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 5103 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
4962 5104
4963 /* 5105 /*
4964 * The next_rpi stores the next module-64 rpi value to post 5106 * The next_rpi stores the next logical module-64 rpi value used
4965 * in any subsequent rpi memory region postings. 5107 * to post physical rpis in subsequent rpi postings.
4966 */ 5108 */
4967 phba->sli4_hba.next_rpi += rpi_count; 5109 phba->sli4_hba.next_rpi += rpi_count;
4968 spin_unlock_irq(&phba->hbalock); 5110 spin_unlock_irq(&phba->hbalock);
@@ -4981,15 +5123,18 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4981 * @phba: pointer to lpfc hba data structure. 5123 * @phba: pointer to lpfc hba data structure.
4982 * 5124 *
4983 * This routine is invoked to remove all memory resources allocated 5125 * This routine is invoked to remove all memory resources allocated
4984 * to support rpis. This routine presumes the caller has released all 5126 * to support rpis for SLI4 ports not supporting extents. This routine
4985 * rpis consumed by fabric or port logins and is prepared to have 5127 * presumes the caller has released all rpis consumed by fabric or port
4986 * the header pages removed. 5128 * logins and is prepared to have the header pages removed.
4987 **/ 5129 **/
4988void 5130void
4989lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 5131lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4990{ 5132{
4991 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 5133 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
4992 5134
5135 if (!phba->sli4_hba.rpi_hdrs_in_use)
5136 goto exit;
5137
4993 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 5138 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
4994 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 5139 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
4995 list_del(&rpi_hdr->list); 5140 list_del(&rpi_hdr->list);
@@ -4998,9 +5143,9 @@ lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4998 kfree(rpi_hdr->dmabuf); 5143 kfree(rpi_hdr->dmabuf);
4999 kfree(rpi_hdr); 5144 kfree(rpi_hdr);
5000 } 5145 }
5001 5146 exit:
5002 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 5147 /* There are no rpis available to the port now. */
5003 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask)); 5148 phba->sli4_hba.next_rpi = 0;
5004} 5149}
5005 5150
5006/** 5151/**
@@ -5487,7 +5632,8 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
5487 /* Final checks. The port status should be clean. */ 5632 /* Final checks. The port status should be clean. */
5488 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 5633 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
5489 &reg_data.word0) || 5634 &reg_data.word0) ||
5490 bf_get(lpfc_sliport_status_err, &reg_data)) { 5635 (bf_get(lpfc_sliport_status_err, &reg_data) &&
5636 !bf_get(lpfc_sliport_status_rn, &reg_data))) {
5491 phba->work_status[0] = 5637 phba->work_status[0] =
5492 readl(phba->sli4_hba.u.if_type2. 5638 readl(phba->sli4_hba.u.if_type2.
5493 ERR1regaddr); 5639 ERR1regaddr);
@@ -5741,7 +5887,12 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5741{ 5887{
5742 LPFC_MBOXQ_t *pmb; 5888 LPFC_MBOXQ_t *pmb;
5743 struct lpfc_mbx_read_config *rd_config; 5889 struct lpfc_mbx_read_config *rd_config;
5744 uint32_t rc = 0; 5890 union lpfc_sli4_cfg_shdr *shdr;
5891 uint32_t shdr_status, shdr_add_status;
5892 struct lpfc_mbx_get_func_cfg *get_func_cfg;
5893 struct lpfc_rsrc_desc_fcfcoe *desc;
5894 uint32_t desc_count;
5895 int length, i, rc = 0;
5745 5896
5746 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5897 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5747 if (!pmb) { 5898 if (!pmb) {
@@ -5763,6 +5914,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5763 rc = -EIO; 5914 rc = -EIO;
5764 } else { 5915 } else {
5765 rd_config = &pmb->u.mqe.un.rd_config; 5916 rd_config = &pmb->u.mqe.un.rd_config;
5917 phba->sli4_hba.extents_in_use =
5918 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
5766 phba->sli4_hba.max_cfg_param.max_xri = 5919 phba->sli4_hba.max_cfg_param.max_xri =
5767 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 5920 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
5768 phba->sli4_hba.max_cfg_param.xri_base = 5921 phba->sli4_hba.max_cfg_param.xri_base =
@@ -5781,8 +5934,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5781 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 5934 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
5782 phba->sli4_hba.max_cfg_param.max_fcfi = 5935 phba->sli4_hba.max_cfg_param.max_fcfi =
5783 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 5936 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
5784 phba->sli4_hba.max_cfg_param.fcfi_base =
5785 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
5786 phba->sli4_hba.max_cfg_param.max_eq = 5937 phba->sli4_hba.max_cfg_param.max_eq =
5787 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 5938 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
5788 phba->sli4_hba.max_cfg_param.max_rq = 5939 phba->sli4_hba.max_cfg_param.max_rq =
@@ -5800,11 +5951,13 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5800 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 5951 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
5801 phba->max_vports = phba->max_vpi; 5952 phba->max_vports = phba->max_vpi;
5802 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5953 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5803 "2003 cfg params XRI(B:%d M:%d), " 5954 "2003 cfg params Extents? %d "
5955 "XRI(B:%d M:%d), "
5804 "VPI(B:%d M:%d) " 5956 "VPI(B:%d M:%d) "
5805 "VFI(B:%d M:%d) " 5957 "VFI(B:%d M:%d) "
5806 "RPI(B:%d M:%d) " 5958 "RPI(B:%d M:%d) "
5807 "FCFI(B:%d M:%d)\n", 5959 "FCFI(Count:%d)\n",
5960 phba->sli4_hba.extents_in_use,
5808 phba->sli4_hba.max_cfg_param.xri_base, 5961 phba->sli4_hba.max_cfg_param.xri_base,
5809 phba->sli4_hba.max_cfg_param.max_xri, 5962 phba->sli4_hba.max_cfg_param.max_xri,
5810 phba->sli4_hba.max_cfg_param.vpi_base, 5963 phba->sli4_hba.max_cfg_param.vpi_base,
@@ -5813,10 +5966,11 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5813 phba->sli4_hba.max_cfg_param.max_vfi, 5966 phba->sli4_hba.max_cfg_param.max_vfi,
5814 phba->sli4_hba.max_cfg_param.rpi_base, 5967 phba->sli4_hba.max_cfg_param.rpi_base,
5815 phba->sli4_hba.max_cfg_param.max_rpi, 5968 phba->sli4_hba.max_cfg_param.max_rpi,
5816 phba->sli4_hba.max_cfg_param.fcfi_base,
5817 phba->sli4_hba.max_cfg_param.max_fcfi); 5969 phba->sli4_hba.max_cfg_param.max_fcfi);
5818 } 5970 }
5819 mempool_free(pmb, phba->mbox_mem_pool); 5971
5972 if (rc)
5973 goto read_cfg_out;
5820 5974
5821 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 5975 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
5822 if (phba->cfg_hba_queue_depth > 5976 if (phba->cfg_hba_queue_depth >
@@ -5825,6 +5979,65 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5825 phba->cfg_hba_queue_depth = 5979 phba->cfg_hba_queue_depth =
5826 phba->sli4_hba.max_cfg_param.max_xri - 5980 phba->sli4_hba.max_cfg_param.max_xri -
5827 lpfc_sli4_get_els_iocb_cnt(phba); 5981 lpfc_sli4_get_els_iocb_cnt(phba);
5982
5983 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
5984 LPFC_SLI_INTF_IF_TYPE_2)
5985 goto read_cfg_out;
5986
5987 /* get the pf# and vf# for SLI4 if_type 2 port */
5988 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
5989 sizeof(struct lpfc_sli4_cfg_mhdr));
5990 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
5991 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
5992 length, LPFC_SLI4_MBX_EMBED);
5993
5994 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5995 shdr = (union lpfc_sli4_cfg_shdr *)
5996 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
5997 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5998 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5999 if (rc || shdr_status || shdr_add_status) {
6000 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6001 "3026 Mailbox failed , mbxCmd x%x "
6002 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
6003 bf_get(lpfc_mqe_command, &pmb->u.mqe),
6004 bf_get(lpfc_mqe_status, &pmb->u.mqe));
6005 rc = -EIO;
6006 goto read_cfg_out;
6007 }
6008
6009 /* search for fc_fcoe resrouce descriptor */
6010 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
6011 desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
6012
6013 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
6014 desc = (struct lpfc_rsrc_desc_fcfcoe *)
6015 &get_func_cfg->func_cfg.desc[i];
6016 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
6017 bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
6018 phba->sli4_hba.iov.pf_number =
6019 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
6020 phba->sli4_hba.iov.vf_number =
6021 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
6022 break;
6023 }
6024 }
6025
6026 if (i < LPFC_RSRC_DESC_MAX_NUM)
6027 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6028 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
6029 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
6030 phba->sli4_hba.iov.vf_number);
6031 else {
6032 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6033 "3028 GET_FUNCTION_CONFIG: failed to find "
6034 "Resrouce Descriptor:x%x\n",
6035 LPFC_RSRC_DESC_TYPE_FCFCOE);
6036 rc = -EIO;
6037 }
6038
6039read_cfg_out:
6040 mempool_free(pmb, phba->mbox_mem_pool);
5828 return rc; 6041 return rc;
5829} 6042}
5830 6043
@@ -6229,8 +6442,10 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6229 phba->sli4_hba.mbx_cq = NULL; 6442 phba->sli4_hba.mbx_cq = NULL;
6230 6443
6231 /* Release FCP response complete queue */ 6444 /* Release FCP response complete queue */
6232 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6445 fcp_qidx = 0;
6446 do
6233 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); 6447 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
6448 while (++fcp_qidx < phba->cfg_fcp_eq_count);
6234 kfree(phba->sli4_hba.fcp_cq); 6449 kfree(phba->sli4_hba.fcp_cq);
6235 phba->sli4_hba.fcp_cq = NULL; 6450 phba->sli4_hba.fcp_cq = NULL;
6236 6451
@@ -6353,16 +6568,24 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6353 phba->sli4_hba.sp_eq->queue_id); 6568 phba->sli4_hba.sp_eq->queue_id);
6354 6569
6355 /* Set up fast-path FCP Response Complete Queue */ 6570 /* Set up fast-path FCP Response Complete Queue */
6356 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 6571 fcp_cqidx = 0;
6572 do {
6357 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 6573 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
6358 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6574 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6359 "0526 Fast-path FCP CQ (%d) not " 6575 "0526 Fast-path FCP CQ (%d) not "
6360 "allocated\n", fcp_cqidx); 6576 "allocated\n", fcp_cqidx);
6361 goto out_destroy_fcp_cq; 6577 goto out_destroy_fcp_cq;
6362 } 6578 }
6363 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], 6579 if (phba->cfg_fcp_eq_count)
6364 phba->sli4_hba.fp_eq[fcp_cqidx], 6580 rc = lpfc_cq_create(phba,
6365 LPFC_WCQ, LPFC_FCP); 6581 phba->sli4_hba.fcp_cq[fcp_cqidx],
6582 phba->sli4_hba.fp_eq[fcp_cqidx],
6583 LPFC_WCQ, LPFC_FCP);
6584 else
6585 rc = lpfc_cq_create(phba,
6586 phba->sli4_hba.fcp_cq[fcp_cqidx],
6587 phba->sli4_hba.sp_eq,
6588 LPFC_WCQ, LPFC_FCP);
6366 if (rc) { 6589 if (rc) {
6367 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6590 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6368 "0527 Failed setup of fast-path FCP " 6591 "0527 Failed setup of fast-path FCP "
@@ -6371,12 +6594,15 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6371 } 6594 }
6372 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6595 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6373 "2588 FCP CQ setup: cq[%d]-id=%d, " 6596 "2588 FCP CQ setup: cq[%d]-id=%d, "
6374 "parent eq[%d]-id=%d\n", 6597 "parent %seq[%d]-id=%d\n",
6375 fcp_cqidx, 6598 fcp_cqidx,
6376 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 6599 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
6600 (phba->cfg_fcp_eq_count) ? "" : "sp_",
6377 fcp_cqidx, 6601 fcp_cqidx,
6378 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id); 6602 (phba->cfg_fcp_eq_count) ?
6379 } 6603 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id :
6604 phba->sli4_hba.sp_eq->queue_id);
6605 } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
6380 6606
6381 /* 6607 /*
6382 * Set up all the Work Queues (WQs) 6608 * Set up all the Work Queues (WQs)
@@ -6445,7 +6671,9 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6445 fcp_cq_index, 6671 fcp_cq_index,
6446 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); 6672 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
6447 /* Round robin FCP Work Queue's Completion Queue assignment */ 6673 /* Round robin FCP Work Queue's Completion Queue assignment */
6448 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count); 6674 if (phba->cfg_fcp_eq_count)
6675 fcp_cq_index = ((fcp_cq_index + 1) %
6676 phba->cfg_fcp_eq_count);
6449 } 6677 }
6450 6678
6451 /* 6679 /*
@@ -6827,6 +7055,8 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
6827 if (rdy_chk < 1000) 7055 if (rdy_chk < 1000)
6828 break; 7056 break;
6829 } 7057 }
7058 /* delay driver action following IF_TYPE_2 function reset */
7059 msleep(100);
6830 break; 7060 break;
6831 case LPFC_SLI_INTF_IF_TYPE_1: 7061 case LPFC_SLI_INTF_IF_TYPE_1:
6832 default: 7062 default:
@@ -7419,11 +7649,15 @@ enable_msix_vectors:
7419 /* 7649 /*
7420 * Assign MSI-X vectors to interrupt handlers 7650 * Assign MSI-X vectors to interrupt handlers
7421 */ 7651 */
7422 7652 if (vectors > 1)
7423 /* The first vector must associated to slow-path handler for MQ */ 7653 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
7424 rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 7654 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
7425 &lpfc_sli4_sp_intr_handler, IRQF_SHARED, 7655 LPFC_SP_DRIVER_HANDLER_NAME, phba);
7426 LPFC_SP_DRIVER_HANDLER_NAME, phba); 7656 else
7657 /* All Interrupts need to be handled by one EQ */
7658 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
7659 &lpfc_sli4_intr_handler, IRQF_SHARED,
7660 LPFC_DRIVER_NAME, phba);
7427 if (rc) { 7661 if (rc) {
7428 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7662 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7429 "0485 MSI-X slow-path request_irq failed " 7663 "0485 MSI-X slow-path request_irq failed "
@@ -7765,6 +7999,7 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
7765{ 7999{
7766 int wait_cnt = 0; 8000 int wait_cnt = 0;
7767 LPFC_MBOXQ_t *mboxq; 8001 LPFC_MBOXQ_t *mboxq;
8002 struct pci_dev *pdev = phba->pcidev;
7768 8003
7769 lpfc_stop_hba_timers(phba); 8004 lpfc_stop_hba_timers(phba);
7770 phba->sli4_hba.intr_enable = 0; 8005 phba->sli4_hba.intr_enable = 0;
@@ -7804,6 +8039,10 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
7804 /* Disable PCI subsystem interrupt */ 8039 /* Disable PCI subsystem interrupt */
7805 lpfc_sli4_disable_intr(phba); 8040 lpfc_sli4_disable_intr(phba);
7806 8041
8042 /* Disable SR-IOV if enabled */
8043 if (phba->cfg_sriov_nr_virtfn)
8044 pci_disable_sriov(pdev);
8045
7807 /* Stop kthread signal shall trigger work_done one more time */ 8046 /* Stop kthread signal shall trigger work_done one more time */
7808 kthread_stop(phba->worker_thread); 8047 kthread_stop(phba->worker_thread);
7809 8048
@@ -7878,6 +8117,11 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7878 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 8117 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
7879 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 8118 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
7880 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 8119 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
8120
8121 /* Make sure that sge_supp_len can be handled by the driver */
8122 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
8123 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
8124
7881 return rc; 8125 return rc;
7882} 8126}
7883 8127
@@ -7902,6 +8146,13 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7902 int length; 8146 int length;
7903 struct lpfc_sli4_parameters *mbx_sli4_parameters; 8147 struct lpfc_sli4_parameters *mbx_sli4_parameters;
7904 8148
8149 /*
8150 * By default, the driver assumes the SLI4 port requires RPI
8151 * header postings. The SLI4_PARAM response will correct this
8152 * assumption.
8153 */
8154 phba->sli4_hba.rpi_hdrs_in_use = 1;
8155
7905 /* Read the port's SLI4 Config Parameters */ 8156 /* Read the port's SLI4 Config Parameters */
7906 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 8157 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
7907 sizeof(struct lpfc_sli4_cfg_mhdr)); 8158 sizeof(struct lpfc_sli4_cfg_mhdr));
@@ -7938,6 +8189,13 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7938 mbx_sli4_parameters); 8189 mbx_sli4_parameters);
7939 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 8190 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
7940 mbx_sli4_parameters); 8191 mbx_sli4_parameters);
8192 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
8193 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
8194
8195 /* Make sure that sge_supp_len can be handled by the driver */
8196 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
8197 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
8198
7941 return 0; 8199 return 0;
7942} 8200}
7943 8201
@@ -8173,6 +8431,10 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
8173 8431
8174 lpfc_debugfs_terminate(vport); 8432 lpfc_debugfs_terminate(vport);
8175 8433
8434 /* Disable SR-IOV if enabled */
8435 if (phba->cfg_sriov_nr_virtfn)
8436 pci_disable_sriov(pdev);
8437
8176 /* Disable interrupt */ 8438 /* Disable interrupt */
8177 lpfc_sli_disable_intr(phba); 8439 lpfc_sli_disable_intr(phba);
8178 8440
@@ -8565,6 +8827,97 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
8565} 8827}
8566 8828
8567/** 8829/**
8830 * lpfc_write_firmware - attempt to write a firmware image to the port
8831 * @phba: pointer to lpfc hba data structure.
8832 * @fw: pointer to firmware image returned from request_firmware.
8833 *
8834 * returns the number of bytes written if write is successful.
8835 * returns a negative error value if there were errors.
8836 * returns 0 if firmware matches currently active firmware on port.
8837 **/
8838int
8839lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
8840{
8841 char fwrev[32];
8842 struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data;
8843 struct list_head dma_buffer_list;
8844 int i, rc = 0;
8845 struct lpfc_dmabuf *dmabuf, *next;
8846 uint32_t offset = 0, temp_offset = 0;
8847
8848 INIT_LIST_HEAD(&dma_buffer_list);
8849 if ((image->magic_number != LPFC_GROUP_OJECT_MAGIC_NUM) ||
8850 (bf_get(lpfc_grp_hdr_file_type, image) != LPFC_FILE_TYPE_GROUP) ||
8851 (bf_get(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
8852 (image->size != fw->size)) {
8853 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8854 "3022 Invalid FW image found. "
8855 "Magic:%d Type:%x ID:%x\n",
8856 image->magic_number,
8857 bf_get(lpfc_grp_hdr_file_type, image),
8858 bf_get(lpfc_grp_hdr_id, image));
8859 return -EINVAL;
8860 }
8861 lpfc_decode_firmware_rev(phba, fwrev, 1);
8862 if (strncmp(fwrev, image->rev_name, strnlen(fwrev, 16))) {
8863 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8864 "3023 Updating Firmware. Current Version:%s "
8865 "New Version:%s\n",
8866 fwrev, image->rev_name);
8867 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
8868 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
8869 GFP_KERNEL);
8870 if (!dmabuf) {
8871 rc = -ENOMEM;
8872 goto out;
8873 }
8874 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
8875 SLI4_PAGE_SIZE,
8876 &dmabuf->phys,
8877 GFP_KERNEL);
8878 if (!dmabuf->virt) {
8879 kfree(dmabuf);
8880 rc = -ENOMEM;
8881 goto out;
8882 }
8883 list_add_tail(&dmabuf->list, &dma_buffer_list);
8884 }
8885 while (offset < fw->size) {
8886 temp_offset = offset;
8887 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
8888 if (offset + SLI4_PAGE_SIZE > fw->size) {
8889 temp_offset += fw->size - offset;
8890 memcpy(dmabuf->virt,
8891 fw->data + temp_offset,
8892 fw->size - offset);
8893 break;
8894 }
8895 temp_offset += SLI4_PAGE_SIZE;
8896 memcpy(dmabuf->virt, fw->data + temp_offset,
8897 SLI4_PAGE_SIZE);
8898 }
8899 rc = lpfc_wr_object(phba, &dma_buffer_list,
8900 (fw->size - offset), &offset);
8901 if (rc) {
8902 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8903 "3024 Firmware update failed. "
8904 "%d\n", rc);
8905 goto out;
8906 }
8907 }
8908 rc = offset;
8909 }
8910out:
8911 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
8912 list_del(&dmabuf->list);
8913 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
8914 dmabuf->virt, dmabuf->phys);
8915 kfree(dmabuf);
8916 }
8917 return rc;
8918}
8919
8920/**
8568 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 8921 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
8569 * @pdev: pointer to PCI device 8922 * @pdev: pointer to PCI device
8570 * @pid: pointer to PCI device identifier 8923 * @pid: pointer to PCI device identifier
@@ -8591,6 +8944,10 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8591 int error; 8944 int error;
8592 uint32_t cfg_mode, intr_mode; 8945 uint32_t cfg_mode, intr_mode;
8593 int mcnt; 8946 int mcnt;
8947 int adjusted_fcp_eq_count;
8948 int fcp_qidx;
8949 const struct firmware *fw;
8950 uint8_t file_name[16];
8594 8951
8595 /* Allocate memory for HBA structure */ 8952 /* Allocate memory for HBA structure */
8596 phba = lpfc_hba_alloc(pdev); 8953 phba = lpfc_hba_alloc(pdev);
@@ -8688,11 +9045,25 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8688 error = -ENODEV; 9045 error = -ENODEV;
8689 goto out_free_sysfs_attr; 9046 goto out_free_sysfs_attr;
8690 } 9047 }
8691 /* Default to single FCP EQ for non-MSI-X */ 9048 /* Default to single EQ for non-MSI-X */
8692 if (phba->intr_type != MSIX) 9049 if (phba->intr_type != MSIX)
8693 phba->cfg_fcp_eq_count = 1; 9050 adjusted_fcp_eq_count = 0;
8694 else if (phba->sli4_hba.msix_vec_nr < phba->cfg_fcp_eq_count) 9051 else if (phba->sli4_hba.msix_vec_nr <
8695 phba->cfg_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1; 9052 phba->cfg_fcp_eq_count + 1)
9053 adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
9054 else
9055 adjusted_fcp_eq_count = phba->cfg_fcp_eq_count;
9056 /* Free unused EQs */
9057 for (fcp_qidx = adjusted_fcp_eq_count;
9058 fcp_qidx < phba->cfg_fcp_eq_count;
9059 fcp_qidx++) {
9060 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
9061 /* do not delete the first fcp_cq */
9062 if (fcp_qidx)
9063 lpfc_sli4_queue_free(
9064 phba->sli4_hba.fcp_cq[fcp_qidx]);
9065 }
9066 phba->cfg_fcp_eq_count = adjusted_fcp_eq_count;
8696 /* Set up SLI-4 HBA */ 9067 /* Set up SLI-4 HBA */
8697 if (lpfc_sli4_hba_setup(phba)) { 9068 if (lpfc_sli4_hba_setup(phba)) {
8698 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9069 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8731,6 +9102,14 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8731 /* Perform post initialization setup */ 9102 /* Perform post initialization setup */
8732 lpfc_post_init_setup(phba); 9103 lpfc_post_init_setup(phba);
8733 9104
9105 /* check for firmware upgrade or downgrade */
9106 snprintf(file_name, 16, "%s.grp", phba->ModelName);
9107 error = request_firmware(&fw, file_name, &phba->pcidev->dev);
9108 if (!error) {
9109 lpfc_write_firmware(phba, fw);
9110 release_firmware(fw);
9111 }
9112
8734 /* Check if there are static vports to be created. */ 9113 /* Check if there are static vports to be created. */
8735 lpfc_create_static_vport(phba); 9114 lpfc_create_static_vport(phba);
8736 9115
@@ -9498,6 +9877,10 @@ static struct pci_device_id lpfc_id_table[] = {
9498 PCI_ANY_ID, PCI_ANY_ID, }, 9877 PCI_ANY_ID, PCI_ANY_ID, },
9499 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE, 9878 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
9500 PCI_ANY_ID, PCI_ANY_ID, }, 9879 PCI_ANY_ID, PCI_ANY_ID, },
9880 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF,
9881 PCI_ANY_ID, PCI_ANY_ID, },
9882 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
9883 PCI_ANY_ID, PCI_ANY_ID, },
9501 { 0 } 9884 { 0 }
9502}; 9885};
9503 9886