aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_init.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_init.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c213
1 files changed, 135 insertions, 78 deletions
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 774663e8e1fe..cd9697edf860 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2566,7 +2566,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2566 shost->max_cmd_len = 16; 2566 shost->max_cmd_len = 16;
2567 if (phba->sli_rev == LPFC_SLI_REV4) { 2567 if (phba->sli_rev == LPFC_SLI_REV4) {
2568 shost->dma_boundary = 2568 shost->dma_boundary =
2569 phba->sli4_hba.pc_sli4_params.sge_supp_len; 2569 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
2570 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 2570 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2571 } 2571 }
2572 2572
@@ -2600,15 +2600,6 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2600 init_timer(&vport->els_tmofunc); 2600 init_timer(&vport->els_tmofunc);
2601 vport->els_tmofunc.function = lpfc_els_timeout; 2601 vport->els_tmofunc.function = lpfc_els_timeout;
2602 vport->els_tmofunc.data = (unsigned long)vport; 2602 vport->els_tmofunc.data = (unsigned long)vport;
2603 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
2604 phba->menlo_flag |= HBA_MENLO_SUPPORT;
2605 /* check for menlo minimum sg count */
2606 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) {
2607 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
2608 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2609 }
2610 }
2611
2612 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 2603 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2613 if (error) 2604 if (error)
2614 goto out_put_shost; 2605 goto out_put_shost;
@@ -3236,12 +3227,26 @@ lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3236 3227
3237 if (!vport) 3228 if (!vport)
3238 return NULL; 3229 return NULL;
3239 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3240 if (!ndlp)
3241 return NULL;
3242 phba = vport->phba; 3230 phba = vport->phba;
3243 if (!phba) 3231 if (!phba)
3244 return NULL; 3232 return NULL;
3233 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3234 if (!ndlp) {
3235 /* Cannot find existing Fabric ndlp, so allocate a new one */
3236 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
3237 if (!ndlp)
3238 return 0;
3239 lpfc_nlp_init(vport, ndlp, Fabric_DID);
3240 /* Set the node type */
3241 ndlp->nlp_type |= NLP_FABRIC;
3242 /* Put ndlp onto node list */
3243 lpfc_enqueue_node(vport, ndlp);
3244 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
3245 /* re-setup ndlp without removing from node list */
3246 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
3247 if (!ndlp)
3248 return 0;
3249 }
3245 if (phba->pport->port_state <= LPFC_FLOGI) 3250 if (phba->pport->port_state <= LPFC_FLOGI)
3246 return NULL; 3251 return NULL;
3247 /* If virtual link is not yet instantiated ignore CVL */ 3252 /* If virtual link is not yet instantiated ignore CVL */
@@ -3304,11 +3309,20 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3304 switch (event_type) { 3309 switch (event_type) {
3305 case LPFC_FCOE_EVENT_TYPE_NEW_FCF: 3310 case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
3306 case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD: 3311 case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
3307 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3312 if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF)
3308 "2546 New FCF found/FCF parameter modified event: " 3313 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3309 "evt_tag:x%x, fcf_index:x%x\n", 3314 LOG_DISCOVERY,
3310 acqe_fcoe->event_tag, acqe_fcoe->index); 3315 "2546 New FCF found event: "
3311 3316 "evt_tag:x%x, fcf_index:x%x\n",
3317 acqe_fcoe->event_tag,
3318 acqe_fcoe->index);
3319 else
3320 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
3321 LOG_DISCOVERY,
3322 "2788 FCF parameter modified event: "
3323 "evt_tag:x%x, fcf_index:x%x\n",
3324 acqe_fcoe->event_tag,
3325 acqe_fcoe->index);
3312 spin_lock_irq(&phba->hbalock); 3326 spin_lock_irq(&phba->hbalock);
3313 if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) || 3327 if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) ||
3314 (phba->hba_flag & FCF_DISC_INPROGRESS)) { 3328 (phba->hba_flag & FCF_DISC_INPROGRESS)) {
@@ -3517,6 +3531,32 @@ lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
3517} 3531}
3518 3532
3519/** 3533/**
3534 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
3535 * @phba: pointer to lpfc hba data structure.
3536 * @acqe_link: pointer to the async grp5 completion queue entry.
3537 *
3538 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
3539 * is an asynchronous notified of a logical link speed change. The Port
3540 * reports the logical link speed in units of 10Mbps.
3541 **/
3542static void
3543lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
3544 struct lpfc_acqe_grp5 *acqe_grp5)
3545{
3546 uint16_t prev_ll_spd;
3547
3548 phba->fc_eventTag = acqe_grp5->event_tag;
3549 phba->fcoe_eventtag = acqe_grp5->event_tag;
3550 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
3551 phba->sli4_hba.link_state.logical_speed =
3552 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5));
3553 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3554 "2789 GRP5 Async Event: Updating logical link speed "
3555 "from %dMbps to %dMbps\n", (prev_ll_spd * 10),
3556 (phba->sli4_hba.link_state.logical_speed*10));
3557}
3558
3559/**
3520 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 3560 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
3521 * @phba: pointer to lpfc hba data structure. 3561 * @phba: pointer to lpfc hba data structure.
3522 * 3562 *
@@ -3552,6 +3592,10 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
3552 lpfc_sli4_async_dcbx_evt(phba, 3592 lpfc_sli4_async_dcbx_evt(phba,
3553 &cq_event->cqe.acqe_dcbx); 3593 &cq_event->cqe.acqe_dcbx);
3554 break; 3594 break;
3595 case LPFC_TRAILER_CODE_GRP5:
3596 lpfc_sli4_async_grp5_evt(phba,
3597 &cq_event->cqe.acqe_grp5);
3598 break;
3555 default: 3599 default:
3556 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3600 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3557 "1804 Invalid asynchrous event code: " 3601 "1804 Invalid asynchrous event code: "
@@ -3813,6 +3857,13 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
3813 3857
3814 /* Get all the module params for configuring this host */ 3858 /* Get all the module params for configuring this host */
3815 lpfc_get_cfgparam(phba); 3859 lpfc_get_cfgparam(phba);
3860 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
3861 phba->menlo_flag |= HBA_MENLO_SUPPORT;
3862 /* check for menlo minimum sg count */
3863 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
3864 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
3865 }
3866
3816 /* 3867 /*
3817 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 3868 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3818 * used to create the sg_dma_buf_pool must be dynamically calculated. 3869 * used to create the sg_dma_buf_pool must be dynamically calculated.
@@ -4030,6 +4081,43 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4030 if (unlikely(rc)) 4081 if (unlikely(rc))
4031 goto out_free_bsmbx; 4082 goto out_free_bsmbx;
4032 4083
4084 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4085 GFP_KERNEL);
4086 if (!mboxq) {
4087 rc = -ENOMEM;
4088 goto out_free_bsmbx;
4089 }
4090
4091 /* Get the Supported Pages. It is always available. */
4092 lpfc_supported_pages(mboxq);
4093 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4094 if (unlikely(rc)) {
4095 rc = -EIO;
4096 mempool_free(mboxq, phba->mbox_mem_pool);
4097 goto out_free_bsmbx;
4098 }
4099
4100 mqe = &mboxq->u.mqe;
4101 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
4102 LPFC_MAX_SUPPORTED_PAGES);
4103 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
4104 switch (pn_page[i]) {
4105 case LPFC_SLI4_PARAMETERS:
4106 phba->sli4_hba.pc_sli4_params.supported = 1;
4107 break;
4108 default:
4109 break;
4110 }
4111 }
4112
4113 /* Read the port's SLI4 Parameters capabilities if supported. */
4114 if (phba->sli4_hba.pc_sli4_params.supported)
4115 rc = lpfc_pc_sli4_params_get(phba, mboxq);
4116 mempool_free(mboxq, phba->mbox_mem_pool);
4117 if (rc) {
4118 rc = -EIO;
4119 goto out_free_bsmbx;
4120 }
4033 /* Create all the SLI4 queues */ 4121 /* Create all the SLI4 queues */
4034 rc = lpfc_sli4_queue_create(phba); 4122 rc = lpfc_sli4_queue_create(phba);
4035 if (rc) 4123 if (rc)
@@ -4090,43 +4178,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4090 goto out_free_fcp_eq_hdl; 4178 goto out_free_fcp_eq_hdl;
4091 } 4179 }
4092 4180
4093 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4094 GFP_KERNEL);
4095 if (!mboxq) {
4096 rc = -ENOMEM;
4097 goto out_free_fcp_eq_hdl;
4098 }
4099
4100 /* Get the Supported Pages. It is always available. */
4101 lpfc_supported_pages(mboxq);
4102 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4103 if (unlikely(rc)) {
4104 rc = -EIO;
4105 mempool_free(mboxq, phba->mbox_mem_pool);
4106 goto out_free_fcp_eq_hdl;
4107 }
4108
4109 mqe = &mboxq->u.mqe;
4110 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
4111 LPFC_MAX_SUPPORTED_PAGES);
4112 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
4113 switch (pn_page[i]) {
4114 case LPFC_SLI4_PARAMETERS:
4115 phba->sli4_hba.pc_sli4_params.supported = 1;
4116 break;
4117 default:
4118 break;
4119 }
4120 }
4121
4122 /* Read the port's SLI4 Parameters capabilities if supported. */
4123 if (phba->sli4_hba.pc_sli4_params.supported)
4124 rc = lpfc_pc_sli4_params_get(phba, mboxq);
4125 mempool_free(mboxq, phba->mbox_mem_pool);
4126 if (rc) {
4127 rc = -EIO;
4128 goto out_free_fcp_eq_hdl;
4129 }
4130 return rc; 4181 return rc;
4131 4182
4132out_free_fcp_eq_hdl: 4183out_free_fcp_eq_hdl:
@@ -5050,6 +5101,8 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
5050 5101
5051 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 5102 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
5052 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 5103 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
5104 phba->mbox_ext = (phba->slim2p.virt +
5105 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
5053 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 5106 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
5054 phba->IOCBs = (phba->slim2p.virt + 5107 phba->IOCBs = (phba->slim2p.virt +
5055 offsetof(struct lpfc_sli2_slim, IOCBs)); 5108 offsetof(struct lpfc_sli2_slim, IOCBs));
@@ -7753,21 +7806,23 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7753 * @phba: pointer to lpfc hba data structure. 7806 * @phba: pointer to lpfc hba data structure.
7754 * 7807 *
7755 * This routine is called to prepare the SLI3 device for PCI slot recover. It 7808 * This routine is called to prepare the SLI3 device for PCI slot recover. It
7756 * aborts and stops all the on-going I/Os on the pci device. 7809 * aborts all the outstanding SCSI I/Os to the pci device.
7757 **/ 7810 **/
7758static void 7811static void
7759lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 7812lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
7760{ 7813{
7814 struct lpfc_sli *psli = &phba->sli;
7815 struct lpfc_sli_ring *pring;
7816
7761 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7817 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7762 "2723 PCI channel I/O abort preparing for recovery\n"); 7818 "2723 PCI channel I/O abort preparing for recovery\n");
7763 /* Prepare for bringing HBA offline */ 7819
7764 lpfc_offline_prep(phba); 7820 /*
7765 /* Clear sli active flag to prevent sysfs access to HBA */ 7821 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
7766 spin_lock_irq(&phba->hbalock); 7822 * and let the SCSI mid-layer to retry them to recover.
7767 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 7823 */
7768 spin_unlock_irq(&phba->hbalock); 7824 pring = &psli->ring[psli->fcp_ring];
7769 /* Stop and flush all I/Os and bring HBA offline */ 7825 lpfc_sli_abort_iocb_ring(phba, pring);
7770 lpfc_offline(phba);
7771} 7826}
7772 7827
7773/** 7828/**
@@ -7781,21 +7836,20 @@ lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
7781static void 7836static void
7782lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 7837lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
7783{ 7838{
7784 struct lpfc_sli *psli = &phba->sli;
7785 struct lpfc_sli_ring *pring;
7786
7787 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7839 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7788 "2710 PCI channel disable preparing for reset\n"); 7840 "2710 PCI channel disable preparing for reset\n");
7841
7842 /* Block all SCSI devices' I/Os on the host */
7843 lpfc_scsi_dev_block(phba);
7844
7845 /* stop all timers */
7846 lpfc_stop_hba_timers(phba);
7847
7789 /* Disable interrupt and pci device */ 7848 /* Disable interrupt and pci device */
7790 lpfc_sli_disable_intr(phba); 7849 lpfc_sli_disable_intr(phba);
7791 pci_disable_device(phba->pcidev); 7850 pci_disable_device(phba->pcidev);
7792 /* 7851 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
7793 * There may be I/Os dropped by the firmware. 7852 lpfc_sli_flush_fcp_rings(phba);
7794 * Error iocb (I/O) on txcmplq and let the SCSI layer
7795 * retry it after re-establishing link.
7796 */
7797 pring = &psli->ring[psli->fcp_ring];
7798 lpfc_sli_abort_iocb_ring(phba, pring);
7799} 7853}
7800 7854
7801/** 7855/**
@@ -7811,6 +7865,12 @@ lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba)
7811{ 7865{
7812 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7866 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7813 "2711 PCI channel permanent disable for failure\n"); 7867 "2711 PCI channel permanent disable for failure\n");
7868 /* Block all SCSI devices' I/Os on the host */
7869 lpfc_scsi_dev_block(phba);
7870
7871 /* stop all timers */
7872 lpfc_stop_hba_timers(phba);
7873
7814 /* Clean up all driver's outstanding SCSI I/Os */ 7874 /* Clean up all driver's outstanding SCSI I/Os */
7815 lpfc_sli_flush_fcp_rings(phba); 7875 lpfc_sli_flush_fcp_rings(phba);
7816} 7876}
@@ -7839,9 +7899,6 @@ lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
7839 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7899 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7840 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7900 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7841 7901
7842 /* Block all SCSI devices' I/Os on the host */
7843 lpfc_scsi_dev_block(phba);
7844
7845 switch (state) { 7902 switch (state) {
7846 case pci_channel_io_normal: 7903 case pci_channel_io_normal:
7847 /* Non-fatal error, prepare for recovery */ 7904 /* Non-fatal error, prepare for recovery */
@@ -7948,7 +8005,7 @@ lpfc_io_resume_s3(struct pci_dev *pdev)
7948 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8005 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7949 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8006 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7950 8007
7951 /* Bring the device online */ 8008 /* Bring device online, it will be no-op for non-fatal error resume */
7952 lpfc_online(phba); 8009 lpfc_online(phba);
7953 8010
7954 /* Clean up Advanced Error Reporting (AER) if needed */ 8011 /* Clean up Advanced Error Reporting (AER) if needed */