diff options
author | Jiri Kosina <jkosina@suse.cz> | 2010-12-22 12:57:02 -0500 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2010-12-22 12:57:02 -0500 |
commit | 4b7bd364700d9ac8372eff48832062b936d0793b (patch) | |
tree | 0dbf78c95456a0b02d07fcd473281f04a87e266d /drivers/scsi/lpfc | |
parent | c0d8768af260e2cbb4bf659ae6094a262c86b085 (diff) | |
parent | 90a8a73c06cc32b609a880d48449d7083327e11a (diff) |
Merge branch 'master' into for-next
Conflicts:
MAINTAINERS
arch/arm/mach-omap2/pm24xx.c
drivers/scsi/bfa/bfa_fcpim.c
Needed to update to apply fixes for which the old branch was too
outdated.
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r-- | drivers/scsi/lpfc/lpfc.h | 12 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_attr.c | 7 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_bsg.c | 4 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_crtn.h | 4 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_els.c | 439 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hbadisc.c | 437 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hw.h | 41 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hw4.h | 167 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_init.c | 102 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_mbox.c | 28 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.c | 13 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.c | 458 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli4.h | 9 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_version.h | 2 |
14 files changed, 1225 insertions, 498 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index a50aa03b8ac1..196de40b906c 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h | |||
@@ -202,9 +202,12 @@ struct lpfc_stats { | |||
202 | uint32_t elsRcvPRLO; | 202 | uint32_t elsRcvPRLO; |
203 | uint32_t elsRcvPRLI; | 203 | uint32_t elsRcvPRLI; |
204 | uint32_t elsRcvLIRR; | 204 | uint32_t elsRcvLIRR; |
205 | uint32_t elsRcvRLS; | ||
205 | uint32_t elsRcvRPS; | 206 | uint32_t elsRcvRPS; |
206 | uint32_t elsRcvRPL; | 207 | uint32_t elsRcvRPL; |
207 | uint32_t elsRcvRRQ; | 208 | uint32_t elsRcvRRQ; |
209 | uint32_t elsRcvRTV; | ||
210 | uint32_t elsRcvECHO; | ||
208 | uint32_t elsXmitFLOGI; | 211 | uint32_t elsXmitFLOGI; |
209 | uint32_t elsXmitFDISC; | 212 | uint32_t elsXmitFDISC; |
210 | uint32_t elsXmitPLOGI; | 213 | uint32_t elsXmitPLOGI; |
@@ -549,9 +552,11 @@ struct lpfc_hba { | |||
549 | #define ELS_XRI_ABORT_EVENT 0x40 | 552 | #define ELS_XRI_ABORT_EVENT 0x40 |
550 | #define ASYNC_EVENT 0x80 | 553 | #define ASYNC_EVENT 0x80 |
551 | #define LINK_DISABLED 0x100 /* Link disabled by user */ | 554 | #define LINK_DISABLED 0x100 /* Link disabled by user */ |
552 | #define FCF_DISC_INPROGRESS 0x200 /* FCF discovery in progress */ | 555 | #define FCF_TS_INPROG 0x200 /* FCF table scan in progress */ |
553 | #define HBA_FIP_SUPPORT 0x400 /* FIP support in HBA */ | 556 | #define FCF_RR_INPROG 0x400 /* FCF roundrobin flogi in progress */ |
554 | #define HBA_AER_ENABLED 0x800 /* AER enabled with HBA */ | 557 | #define HBA_FIP_SUPPORT 0x800 /* FIP support in HBA */ |
558 | #define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */ | ||
559 | #define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */ | ||
555 | uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ | 560 | uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ |
556 | struct lpfc_dmabuf slim2p; | 561 | struct lpfc_dmabuf slim2p; |
557 | 562 | ||
@@ -573,6 +578,7 @@ struct lpfc_hba { | |||
573 | /* These fields used to be binfo */ | 578 | /* These fields used to be binfo */ |
574 | uint32_t fc_pref_DID; /* preferred D_ID */ | 579 | uint32_t fc_pref_DID; /* preferred D_ID */ |
575 | uint8_t fc_pref_ALPA; /* preferred AL_PA */ | 580 | uint8_t fc_pref_ALPA; /* preferred AL_PA */ |
581 | uint32_t fc_edtovResol; /* E_D_TOV timer resolution */ | ||
576 | uint32_t fc_edtov; /* E_D_TOV timer value */ | 582 | uint32_t fc_edtov; /* E_D_TOV timer value */ |
577 | uint32_t fc_arbtov; /* ARB_TOV timer value */ | 583 | uint32_t fc_arbtov; /* ARB_TOV timer value */ |
578 | uint32_t fc_ratov; /* R_A_TOV timer value */ | 584 | uint32_t fc_ratov; /* R_A_TOV timer value */ |
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 0e4abb96d68e..2d7b01a2bb2b 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
@@ -3789,8 +3789,13 @@ sysfs_mbox_read(struct file *filp, struct kobject *kobj, | |||
3789 | break; | 3789 | break; |
3790 | case MBX_SECURITY_MGMT: | 3790 | case MBX_SECURITY_MGMT: |
3791 | case MBX_AUTH_PORT: | 3791 | case MBX_AUTH_PORT: |
3792 | if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) | 3792 | if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { |
3793 | printk(KERN_WARNING "mbox_read:Command 0x%x " | ||
3794 | "is not permitted\n", pmb->mbxCommand); | ||
3795 | sysfs_mbox_idle(phba); | ||
3796 | spin_unlock_irq(&phba->hbalock); | ||
3793 | return -EPERM; | 3797 | return -EPERM; |
3798 | } | ||
3794 | break; | 3799 | break; |
3795 | case MBX_READ_SPARM64: | 3800 | case MBX_READ_SPARM64: |
3796 | case MBX_READ_LA: | 3801 | case MBX_READ_LA: |
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index f5d60b55f53a..7260c3af555a 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c | |||
@@ -3142,12 +3142,12 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba, | |||
3142 | job = menlo->set_job; | 3142 | job = menlo->set_job; |
3143 | job->dd_data = NULL; /* so timeout handler does not reply */ | 3143 | job->dd_data = NULL; /* so timeout handler does not reply */ |
3144 | 3144 | ||
3145 | spin_lock_irqsave(&phba->hbalock, flags); | 3145 | spin_lock(&phba->hbalock); |
3146 | cmdiocbq->iocb_flag |= LPFC_IO_WAKE; | 3146 | cmdiocbq->iocb_flag |= LPFC_IO_WAKE; |
3147 | if (cmdiocbq->context2 && rspiocbq) | 3147 | if (cmdiocbq->context2 && rspiocbq) |
3148 | memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, | 3148 | memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, |
3149 | &rspiocbq->iocb, sizeof(IOCB_t)); | 3149 | &rspiocbq->iocb, sizeof(IOCB_t)); |
3150 | spin_unlock_irqrestore(&phba->hbalock, flags); | 3150 | spin_unlock(&phba->hbalock); |
3151 | 3151 | ||
3152 | bmp = menlo->bmp; | 3152 | bmp = menlo->bmp; |
3153 | rspiocbq = menlo->rspiocbq; | 3153 | rspiocbq = menlo->rspiocbq; |
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 03f4ddc18572..a5f5a093a8a4 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h | |||
@@ -44,6 +44,8 @@ int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *, | |||
44 | void lpfc_set_var(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); | 44 | void lpfc_set_var(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); |
45 | void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); | 45 | void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); |
46 | void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); | 46 | void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); |
47 | void lpfc_sli4_unreg_all_rpis(struct lpfc_vport *); | ||
48 | |||
47 | void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *); | 49 | void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *); |
48 | void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *, | 50 | void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *, |
49 | struct lpfc_nodelist *); | 51 | struct lpfc_nodelist *); |
@@ -229,6 +231,7 @@ void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *); | |||
229 | uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *); | 231 | uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *); |
230 | int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t); | 232 | int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t); |
231 | void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t); | 233 | void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t); |
234 | int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t); | ||
232 | 235 | ||
233 | int lpfc_mem_alloc(struct lpfc_hba *, int align); | 236 | int lpfc_mem_alloc(struct lpfc_hba *, int align); |
234 | void lpfc_mem_free(struct lpfc_hba *); | 237 | void lpfc_mem_free(struct lpfc_hba *); |
@@ -271,6 +274,7 @@ int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, | |||
271 | void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); | 274 | void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); |
272 | void lpfc_sli_bemem_bcopy(void *, void *, uint32_t); | 275 | void lpfc_sli_bemem_bcopy(void *, void *, uint32_t); |
273 | void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); | 276 | void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); |
277 | void lpfc_sli_hba_iocb_abort(struct lpfc_hba *); | ||
274 | void lpfc_sli_flush_fcp_rings(struct lpfc_hba *); | 278 | void lpfc_sli_flush_fcp_rings(struct lpfc_hba *); |
275 | int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, | 279 | int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, |
276 | struct lpfc_dmabuf *); | 280 | struct lpfc_dmabuf *); |
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index e6ca12f6c6cb..884f4d321799 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
@@ -177,15 +177,18 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, | |||
177 | (elscmd == ELS_CMD_LOGO))) | 177 | (elscmd == ELS_CMD_LOGO))) |
178 | switch (elscmd) { | 178 | switch (elscmd) { |
179 | case ELS_CMD_FLOGI: | 179 | case ELS_CMD_FLOGI: |
180 | elsiocb->iocb_flag |= ((ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) | 180 | elsiocb->iocb_flag |= |
181 | ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) | ||
181 | & LPFC_FIP_ELS_ID_MASK); | 182 | & LPFC_FIP_ELS_ID_MASK); |
182 | break; | 183 | break; |
183 | case ELS_CMD_FDISC: | 184 | case ELS_CMD_FDISC: |
184 | elsiocb->iocb_flag |= ((ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) | 185 | elsiocb->iocb_flag |= |
186 | ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) | ||
185 | & LPFC_FIP_ELS_ID_MASK); | 187 | & LPFC_FIP_ELS_ID_MASK); |
186 | break; | 188 | break; |
187 | case ELS_CMD_LOGO: | 189 | case ELS_CMD_LOGO: |
188 | elsiocb->iocb_flag |= ((ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) | 190 | elsiocb->iocb_flag |= |
191 | ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) | ||
189 | & LPFC_FIP_ELS_ID_MASK); | 192 | & LPFC_FIP_ELS_ID_MASK); |
190 | break; | 193 | break; |
191 | } | 194 | } |
@@ -517,18 +520,13 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
517 | if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ | 520 | if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ |
518 | phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; | 521 | phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; |
519 | 522 | ||
523 | phba->fc_edtovResol = sp->cmn.edtovResolution; | ||
520 | phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; | 524 | phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; |
521 | 525 | ||
522 | if (phba->fc_topology == TOPOLOGY_LOOP) { | 526 | if (phba->fc_topology == TOPOLOGY_LOOP) { |
523 | spin_lock_irq(shost->host_lock); | 527 | spin_lock_irq(shost->host_lock); |
524 | vport->fc_flag |= FC_PUBLIC_LOOP; | 528 | vport->fc_flag |= FC_PUBLIC_LOOP; |
525 | spin_unlock_irq(shost->host_lock); | 529 | spin_unlock_irq(shost->host_lock); |
526 | } else { | ||
527 | /* | ||
528 | * If we are a N-port connected to a Fabric, fixup sparam's so | ||
529 | * logins to devices on remote loops work. | ||
530 | */ | ||
531 | vport->fc_sparam.cmn.altBbCredit = 1; | ||
532 | } | 530 | } |
533 | 531 | ||
534 | vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; | 532 | vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; |
@@ -585,6 +583,10 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
585 | lpfc_unreg_rpi(vport, np); | 583 | lpfc_unreg_rpi(vport, np); |
586 | } | 584 | } |
587 | lpfc_cleanup_pending_mbox(vport); | 585 | lpfc_cleanup_pending_mbox(vport); |
586 | |||
587 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
588 | lpfc_sli4_unreg_all_rpis(vport); | ||
589 | |||
588 | if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { | 590 | if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { |
589 | lpfc_mbx_unreg_vpi(vport); | 591 | lpfc_mbx_unreg_vpi(vport); |
590 | spin_lock_irq(shost->host_lock); | 592 | spin_lock_irq(shost->host_lock); |
@@ -800,7 +802,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
800 | 802 | ||
801 | if (irsp->ulpStatus) { | 803 | if (irsp->ulpStatus) { |
802 | /* | 804 | /* |
803 | * In case of FIP mode, perform round robin FCF failover | 805 | * In case of FIP mode, perform roundrobin FCF failover |
804 | * due to new FCF discovery | 806 | * due to new FCF discovery |
805 | */ | 807 | */ |
806 | if ((phba->hba_flag & HBA_FIP_SUPPORT) && | 808 | if ((phba->hba_flag & HBA_FIP_SUPPORT) && |
@@ -808,48 +810,16 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
808 | (irsp->ulpStatus != IOSTAT_LOCAL_REJECT) && | 810 | (irsp->ulpStatus != IOSTAT_LOCAL_REJECT) && |
809 | (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED)) { | 811 | (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED)) { |
810 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, | 812 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, |
811 | "2611 FLOGI failed on registered " | 813 | "2611 FLOGI failed on FCF (x%x), " |
812 | "FCF record fcf_index(%d), status: " | 814 | "status:x%x/x%x, tmo:x%x, perform " |
813 | "x%x/x%x, tmo:x%x, trying to perform " | 815 | "roundrobin FCF failover\n", |
814 | "round robin failover\n", | ||
815 | phba->fcf.current_rec.fcf_indx, | 816 | phba->fcf.current_rec.fcf_indx, |
816 | irsp->ulpStatus, irsp->un.ulpWord[4], | 817 | irsp->ulpStatus, irsp->un.ulpWord[4], |
817 | irsp->ulpTimeout); | 818 | irsp->ulpTimeout); |
818 | fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); | 819 | fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); |
819 | if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { | 820 | rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); |
820 | /* | 821 | if (rc) |
821 | * Exhausted the eligible FCF record list, | 822 | goto out; |
822 | * fail through to retry FLOGI on current | ||
823 | * FCF record. | ||
824 | */ | ||
825 | lpfc_printf_log(phba, KERN_WARNING, | ||
826 | LOG_FIP | LOG_ELS, | ||
827 | "2760 Completed one round " | ||
828 | "of FLOGI FCF round robin " | ||
829 | "failover list, retry FLOGI " | ||
830 | "on currently registered " | ||
831 | "FCF index:%d\n", | ||
832 | phba->fcf.current_rec.fcf_indx); | ||
833 | } else { | ||
834 | lpfc_printf_log(phba, KERN_INFO, | ||
835 | LOG_FIP | LOG_ELS, | ||
836 | "2794 FLOGI FCF round robin " | ||
837 | "failover to FCF index x%x\n", | ||
838 | fcf_index); | ||
839 | rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, | ||
840 | fcf_index); | ||
841 | if (rc) | ||
842 | lpfc_printf_log(phba, KERN_WARNING, | ||
843 | LOG_FIP | LOG_ELS, | ||
844 | "2761 FLOGI round " | ||
845 | "robin FCF failover " | ||
846 | "read FCF failed " | ||
847 | "rc:x%x, fcf_index:" | ||
848 | "%d\n", rc, | ||
849 | phba->fcf.current_rec.fcf_indx); | ||
850 | else | ||
851 | goto out; | ||
852 | } | ||
853 | } | 823 | } |
854 | 824 | ||
855 | /* FLOGI failure */ | 825 | /* FLOGI failure */ |
@@ -939,6 +909,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
939 | lpfc_nlp_put(ndlp); | 909 | lpfc_nlp_put(ndlp); |
940 | spin_lock_irq(&phba->hbalock); | 910 | spin_lock_irq(&phba->hbalock); |
941 | phba->fcf.fcf_flag &= ~FCF_DISCOVERY; | 911 | phba->fcf.fcf_flag &= ~FCF_DISCOVERY; |
912 | phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); | ||
942 | spin_unlock_irq(&phba->hbalock); | 913 | spin_unlock_irq(&phba->hbalock); |
943 | goto out; | 914 | goto out; |
944 | } | 915 | } |
@@ -947,13 +918,12 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
947 | if (phba->hba_flag & HBA_FIP_SUPPORT) | 918 | if (phba->hba_flag & HBA_FIP_SUPPORT) |
948 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | | 919 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | |
949 | LOG_ELS, | 920 | LOG_ELS, |
950 | "2769 FLOGI successful on FCF " | 921 | "2769 FLOGI to FCF (x%x) " |
951 | "record: current_fcf_index:" | 922 | "completed successfully\n", |
952 | "x%x, terminate FCF round " | ||
953 | "robin failover process\n", | ||
954 | phba->fcf.current_rec.fcf_indx); | 923 | phba->fcf.current_rec.fcf_indx); |
955 | spin_lock_irq(&phba->hbalock); | 924 | spin_lock_irq(&phba->hbalock); |
956 | phba->fcf.fcf_flag &= ~FCF_DISCOVERY; | 925 | phba->fcf.fcf_flag &= ~FCF_DISCOVERY; |
926 | phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); | ||
957 | spin_unlock_irq(&phba->hbalock); | 927 | spin_unlock_irq(&phba->hbalock); |
958 | goto out; | 928 | goto out; |
959 | } | 929 | } |
@@ -1175,12 +1145,13 @@ lpfc_initial_flogi(struct lpfc_vport *vport) | |||
1175 | return 0; | 1145 | return 0; |
1176 | } | 1146 | } |
1177 | 1147 | ||
1178 | if (lpfc_issue_els_flogi(vport, ndlp, 0)) | 1148 | if (lpfc_issue_els_flogi(vport, ndlp, 0)) { |
1179 | /* This decrement of reference count to node shall kick off | 1149 | /* This decrement of reference count to node shall kick off |
1180 | * the release of the node. | 1150 | * the release of the node. |
1181 | */ | 1151 | */ |
1182 | lpfc_nlp_put(ndlp); | 1152 | lpfc_nlp_put(ndlp); |
1183 | 1153 | return 0; | |
1154 | } | ||
1184 | return 1; | 1155 | return 1; |
1185 | } | 1156 | } |
1186 | 1157 | ||
@@ -1645,6 +1616,13 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) | |||
1645 | memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); | 1616 | memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); |
1646 | sp = (struct serv_parm *) pcmd; | 1617 | sp = (struct serv_parm *) pcmd; |
1647 | 1618 | ||
1619 | /* | ||
1620 | * If we are a N-port connected to a Fabric, fix-up paramm's so logins | ||
1621 | * to device on remote loops work. | ||
1622 | */ | ||
1623 | if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP)) | ||
1624 | sp->cmn.altBbCredit = 1; | ||
1625 | |||
1648 | if (sp->cmn.fcphLow < FC_PH_4_3) | 1626 | if (sp->cmn.fcphLow < FC_PH_4_3) |
1649 | sp->cmn.fcphLow = FC_PH_4_3; | 1627 | sp->cmn.fcphLow = FC_PH_4_3; |
1650 | 1628 | ||
@@ -3926,6 +3904,64 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, | |||
3926 | } | 3904 | } |
3927 | 3905 | ||
3928 | /** | 3906 | /** |
3907 | * lpfc_els_rsp_echo_acc - Issue echo acc response | ||
3908 | * @vport: pointer to a virtual N_Port data structure. | ||
3909 | * @data: pointer to echo data to return in the accept. | ||
3910 | * @oldiocb: pointer to the original lpfc command iocb data structure. | ||
3911 | * @ndlp: pointer to a node-list data structure. | ||
3912 | * | ||
3913 | * Return code | ||
3914 | * 0 - Successfully issued acc echo response | ||
3915 | * 1 - Failed to issue acc echo response | ||
3916 | **/ | ||
3917 | static int | ||
3918 | lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, | ||
3919 | struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) | ||
3920 | { | ||
3921 | struct lpfc_hba *phba = vport->phba; | ||
3922 | struct lpfc_iocbq *elsiocb; | ||
3923 | struct lpfc_sli *psli; | ||
3924 | uint8_t *pcmd; | ||
3925 | uint16_t cmdsize; | ||
3926 | int rc; | ||
3927 | |||
3928 | psli = &phba->sli; | ||
3929 | cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; | ||
3930 | |||
3931 | elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, | ||
3932 | ndlp->nlp_DID, ELS_CMD_ACC); | ||
3933 | if (!elsiocb) | ||
3934 | return 1; | ||
3935 | |||
3936 | elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri */ | ||
3937 | /* Xmit ECHO ACC response tag <ulpIoTag> */ | ||
3938 | lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, | ||
3939 | "2876 Xmit ECHO ACC response tag x%x xri x%x\n", | ||
3940 | elsiocb->iotag, elsiocb->iocb.ulpContext); | ||
3941 | pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); | ||
3942 | *((uint32_t *) (pcmd)) = ELS_CMD_ACC; | ||
3943 | pcmd += sizeof(uint32_t); | ||
3944 | memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); | ||
3945 | |||
3946 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, | ||
3947 | "Issue ACC ECHO: did:x%x flg:x%x", | ||
3948 | ndlp->nlp_DID, ndlp->nlp_flag, 0); | ||
3949 | |||
3950 | phba->fc_stat.elsXmitACC++; | ||
3951 | elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; | ||
3952 | lpfc_nlp_put(ndlp); | ||
3953 | elsiocb->context1 = NULL; /* Don't need ndlp for cmpl, | ||
3954 | * it could be freed */ | ||
3955 | |||
3956 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); | ||
3957 | if (rc == IOCB_ERROR) { | ||
3958 | lpfc_els_free_iocb(phba, elsiocb); | ||
3959 | return 1; | ||
3960 | } | ||
3961 | return 0; | ||
3962 | } | ||
3963 | |||
3964 | /** | ||
3929 | * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport | 3965 | * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport |
3930 | * @vport: pointer to a host virtual N_Port data structure. | 3966 | * @vport: pointer to a host virtual N_Port data structure. |
3931 | * | 3967 | * |
@@ -4684,6 +4720,30 @@ lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
4684 | } | 4720 | } |
4685 | 4721 | ||
4686 | /** | 4722 | /** |
4723 | * lpfc_els_rcv_echo - Process an unsolicited echo iocb | ||
4724 | * @vport: pointer to a host virtual N_Port data structure. | ||
4725 | * @cmdiocb: pointer to lpfc command iocb data structure. | ||
4726 | * @ndlp: pointer to a node-list data structure. | ||
4727 | * | ||
4728 | * Return code | ||
4729 | * 0 - Successfully processed echo iocb (currently always return 0) | ||
4730 | **/ | ||
4731 | static int | ||
4732 | lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | ||
4733 | struct lpfc_nodelist *ndlp) | ||
4734 | { | ||
4735 | uint8_t *pcmd; | ||
4736 | |||
4737 | pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); | ||
4738 | |||
4739 | /* skip over first word of echo command to find echo data */ | ||
4740 | pcmd += sizeof(uint32_t); | ||
4741 | |||
4742 | lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp); | ||
4743 | return 0; | ||
4744 | } | ||
4745 | |||
4746 | /** | ||
4687 | * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb | 4747 | * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb |
4688 | * @vport: pointer to a host virtual N_Port data structure. | 4748 | * @vport: pointer to a host virtual N_Port data structure. |
4689 | * @cmdiocb: pointer to lpfc command iocb data structure. | 4749 | * @cmdiocb: pointer to lpfc command iocb data structure. |
@@ -4735,6 +4795,89 @@ lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
4735 | } | 4795 | } |
4736 | 4796 | ||
4737 | /** | 4797 | /** |
4798 | * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd | ||
4799 | * @phba: pointer to lpfc hba data structure. | ||
4800 | * @pmb: pointer to the driver internal queue element for mailbox command. | ||
4801 | * | ||
4802 | * This routine is the completion callback function for the MBX_READ_LNK_STAT | ||
4803 | * mailbox command. This callback function is to actually send the Accept | ||
4804 | * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It | ||
4805 | * collects the link statistics from the completion of the MBX_READ_LNK_STAT | ||
4806 | * mailbox command, constructs the RPS response with the link statistics | ||
4807 | * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC | ||
4808 | * response to the RPS. | ||
4809 | * | ||
4810 | * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp | ||
4811 | * will be incremented by 1 for holding the ndlp and the reference to ndlp | ||
4812 | * will be stored into the context1 field of the IOCB for the completion | ||
4813 | * callback function to the RPS Accept Response ELS IOCB command. | ||
4814 | * | ||
4815 | **/ | ||
4816 | static void | ||
4817 | lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | ||
4818 | { | ||
4819 | MAILBOX_t *mb; | ||
4820 | IOCB_t *icmd; | ||
4821 | struct RLS_RSP *rls_rsp; | ||
4822 | uint8_t *pcmd; | ||
4823 | struct lpfc_iocbq *elsiocb; | ||
4824 | struct lpfc_nodelist *ndlp; | ||
4825 | uint16_t xri; | ||
4826 | uint32_t cmdsize; | ||
4827 | |||
4828 | mb = &pmb->u.mb; | ||
4829 | |||
4830 | ndlp = (struct lpfc_nodelist *) pmb->context2; | ||
4831 | xri = (uint16_t) ((unsigned long)(pmb->context1)); | ||
4832 | pmb->context1 = NULL; | ||
4833 | pmb->context2 = NULL; | ||
4834 | |||
4835 | if (mb->mbxStatus) { | ||
4836 | mempool_free(pmb, phba->mbox_mem_pool); | ||
4837 | return; | ||
4838 | } | ||
4839 | |||
4840 | cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); | ||
4841 | mempool_free(pmb, phba->mbox_mem_pool); | ||
4842 | elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, | ||
4843 | lpfc_max_els_tries, ndlp, | ||
4844 | ndlp->nlp_DID, ELS_CMD_ACC); | ||
4845 | |||
4846 | /* Decrement the ndlp reference count from previous mbox command */ | ||
4847 | lpfc_nlp_put(ndlp); | ||
4848 | |||
4849 | if (!elsiocb) | ||
4850 | return; | ||
4851 | |||
4852 | icmd = &elsiocb->iocb; | ||
4853 | icmd->ulpContext = xri; | ||
4854 | |||
4855 | pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); | ||
4856 | *((uint32_t *) (pcmd)) = ELS_CMD_ACC; | ||
4857 | pcmd += sizeof(uint32_t); /* Skip past command */ | ||
4858 | rls_rsp = (struct RLS_RSP *)pcmd; | ||
4859 | |||
4860 | rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); | ||
4861 | rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); | ||
4862 | rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); | ||
4863 | rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); | ||
4864 | rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); | ||
4865 | rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); | ||
4866 | |||
4867 | /* Xmit ELS RLS ACC response tag <ulpIoTag> */ | ||
4868 | lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, | ||
4869 | "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " | ||
4870 | "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", | ||
4871 | elsiocb->iotag, elsiocb->iocb.ulpContext, | ||
4872 | ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, | ||
4873 | ndlp->nlp_rpi); | ||
4874 | elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; | ||
4875 | phba->fc_stat.elsXmitACC++; | ||
4876 | if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) | ||
4877 | lpfc_els_free_iocb(phba, elsiocb); | ||
4878 | } | ||
4879 | |||
4880 | /** | ||
4738 | * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd | 4881 | * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd |
4739 | * @phba: pointer to lpfc hba data structure. | 4882 | * @phba: pointer to lpfc hba data structure. |
4740 | * @pmb: pointer to the driver internal queue element for mailbox command. | 4883 | * @pmb: pointer to the driver internal queue element for mailbox command. |
@@ -4827,7 +4970,155 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
4827 | } | 4970 | } |
4828 | 4971 | ||
4829 | /** | 4972 | /** |
4830 | * lpfc_els_rcv_rps - Process an unsolicited rps iocb | 4973 | * lpfc_els_rcv_rls - Process an unsolicited rls iocb |
4974 | * @vport: pointer to a host virtual N_Port data structure. | ||
4975 | * @cmdiocb: pointer to lpfc command iocb data structure. | ||
4976 | * @ndlp: pointer to a node-list data structure. | ||
4977 | * | ||
4978 | * This routine processes Read Port Status (RPL) IOCB received as an | ||
4979 | * ELS unsolicited event. It first checks the remote port state. If the | ||
4980 | * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE | ||
4981 | * state, it invokes the lpfc_els_rsl_reject() routine to send the reject | ||
4982 | * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command | ||
4983 | * for reading the HBA link statistics. It is for the callback function, | ||
4984 | * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command | ||
4985 | * to actually sending out RPL Accept (ACC) response. | ||
4986 | * | ||
4987 | * Return codes | ||
4988 | * 0 - Successfully processed rls iocb (currently always return 0) | ||
4989 | **/ | ||
4990 | static int | ||
4991 | lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | ||
4992 | struct lpfc_nodelist *ndlp) | ||
4993 | { | ||
4994 | struct lpfc_hba *phba = vport->phba; | ||
4995 | LPFC_MBOXQ_t *mbox; | ||
4996 | struct lpfc_dmabuf *pcmd; | ||
4997 | struct ls_rjt stat; | ||
4998 | |||
4999 | if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && | ||
5000 | (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) | ||
5001 | /* reject the unsolicited RPS request and done with it */ | ||
5002 | goto reject_out; | ||
5003 | |||
5004 | pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; | ||
5005 | |||
5006 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); | ||
5007 | if (mbox) { | ||
5008 | lpfc_read_lnk_stat(phba, mbox); | ||
5009 | mbox->context1 = | ||
5010 | (void *)((unsigned long) cmdiocb->iocb.ulpContext); | ||
5011 | mbox->context2 = lpfc_nlp_get(ndlp); | ||
5012 | mbox->vport = vport; | ||
5013 | mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; | ||
5014 | if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) | ||
5015 | != MBX_NOT_FINISHED) | ||
5016 | /* Mbox completion will send ELS Response */ | ||
5017 | return 0; | ||
5018 | /* Decrement reference count used for the failed mbox | ||
5019 | * command. | ||
5020 | */ | ||
5021 | lpfc_nlp_put(ndlp); | ||
5022 | mempool_free(mbox, phba->mbox_mem_pool); | ||
5023 | } | ||
5024 | reject_out: | ||
5025 | /* issue rejection response */ | ||
5026 | stat.un.b.lsRjtRsvd0 = 0; | ||
5027 | stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; | ||
5028 | stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; | ||
5029 | stat.un.b.vendorUnique = 0; | ||
5030 | lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); | ||
5031 | return 0; | ||
5032 | } | ||
5033 | |||
5034 | /** | ||
5035 | * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb | ||
5036 | * @vport: pointer to a host virtual N_Port data structure. | ||
5037 | * @cmdiocb: pointer to lpfc command iocb data structure. | ||
5038 | * @ndlp: pointer to a node-list data structure. | ||
5039 | * | ||
5040 | * This routine processes Read Timout Value (RTV) IOCB received as an | ||
5041 | * ELS unsolicited event. It first checks the remote port state. If the | ||
5042 | * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE | ||
5043 | * state, it invokes the lpfc_els_rsl_reject() routine to send the reject | ||
5044 | * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout | ||
5045 | * Value (RTV) unsolicited IOCB event. | ||
5046 | * | ||
5047 | * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp | ||
5048 | * will be incremented by 1 for holding the ndlp and the reference to ndlp | ||
5049 | * will be stored into the context1 field of the IOCB for the completion | ||
5050 | * callback function to the RPS Accept Response ELS IOCB command. | ||
5051 | * | ||
5052 | * Return codes | ||
5053 | * 0 - Successfully processed rtv iocb (currently always return 0) | ||
5054 | **/ | ||
5055 | static int | ||
5056 | lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | ||
5057 | struct lpfc_nodelist *ndlp) | ||
5058 | { | ||
5059 | struct lpfc_hba *phba = vport->phba; | ||
5060 | struct ls_rjt stat; | ||
5061 | struct RTV_RSP *rtv_rsp; | ||
5062 | uint8_t *pcmd; | ||
5063 | struct lpfc_iocbq *elsiocb; | ||
5064 | uint32_t cmdsize; | ||
5065 | |||
5066 | |||
5067 | if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && | ||
5068 | (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) | ||
5069 | /* reject the unsolicited RPS request and done with it */ | ||
5070 | goto reject_out; | ||
5071 | |||
5072 | cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); | ||
5073 | elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, | ||
5074 | lpfc_max_els_tries, ndlp, | ||
5075 | ndlp->nlp_DID, ELS_CMD_ACC); | ||
5076 | |||
5077 | if (!elsiocb) | ||
5078 | return 1; | ||
5079 | |||
5080 | pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); | ||
5081 | *((uint32_t *) (pcmd)) = ELS_CMD_ACC; | ||
5082 | pcmd += sizeof(uint32_t); /* Skip past command */ | ||
5083 | |||
5084 | /* use the command's xri in the response */ | ||
5085 | elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; | ||
5086 | |||
5087 | rtv_rsp = (struct RTV_RSP *)pcmd; | ||
5088 | |||
5089 | /* populate RTV payload */ | ||
5090 | rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */ | ||
5091 | rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov); | ||
5092 | bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0); | ||
5093 | bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */ | ||
5094 | rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov); | ||
5095 | |||
5096 | /* Xmit ELS RLS ACC response tag <ulpIoTag> */ | ||
5097 | lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, | ||
5098 | "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " | ||
5099 | "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " | ||
5100 | "Data: x%x x%x x%x\n", | ||
5101 | elsiocb->iotag, elsiocb->iocb.ulpContext, | ||
5102 | ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, | ||
5103 | ndlp->nlp_rpi, | ||
5104 | rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov); | ||
5105 | elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; | ||
5106 | phba->fc_stat.elsXmitACC++; | ||
5107 | if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) | ||
5108 | lpfc_els_free_iocb(phba, elsiocb); | ||
5109 | return 0; | ||
5110 | |||
5111 | reject_out: | ||
5112 | /* issue rejection response */ | ||
5113 | stat.un.b.lsRjtRsvd0 = 0; | ||
5114 | stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; | ||
5115 | stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; | ||
5116 | stat.un.b.vendorUnique = 0; | ||
5117 | lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); | ||
5118 | return 0; | ||
5119 | } | ||
5120 | |||
5121 | /* lpfc_els_rcv_rps - Process an unsolicited rps iocb | ||
4831 | * @vport: pointer to a host virtual N_Port data structure. | 5122 | * @vport: pointer to a host virtual N_Port data structure. |
4832 | * @cmdiocb: pointer to lpfc command iocb data structure. | 5123 | * @cmdiocb: pointer to lpfc command iocb data structure. |
4833 | * @ndlp: pointer to a node-list data structure. | 5124 | * @ndlp: pointer to a node-list data structure. |
@@ -5017,7 +5308,6 @@ lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
5017 | pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; | 5308 | pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; |
5018 | lp = (uint32_t *) pcmd->virt; | 5309 | lp = (uint32_t *) pcmd->virt; |
5019 | rpl = (RPL *) (lp + 1); | 5310 | rpl = (RPL *) (lp + 1); |
5020 | |||
5021 | maxsize = be32_to_cpu(rpl->maxsize); | 5311 | maxsize = be32_to_cpu(rpl->maxsize); |
5022 | 5312 | ||
5023 | /* We support only one port */ | 5313 | /* We support only one port */ |
@@ -5836,6 +6126,16 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
5836 | if (newnode) | 6126 | if (newnode) |
5837 | lpfc_nlp_put(ndlp); | 6127 | lpfc_nlp_put(ndlp); |
5838 | break; | 6128 | break; |
6129 | case ELS_CMD_RLS: | ||
6130 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, | ||
6131 | "RCV RLS: did:x%x/ste:x%x flg:x%x", | ||
6132 | did, vport->port_state, ndlp->nlp_flag); | ||
6133 | |||
6134 | phba->fc_stat.elsRcvRLS++; | ||
6135 | lpfc_els_rcv_rls(vport, elsiocb, ndlp); | ||
6136 | if (newnode) | ||
6137 | lpfc_nlp_put(ndlp); | ||
6138 | break; | ||
5839 | case ELS_CMD_RPS: | 6139 | case ELS_CMD_RPS: |
5840 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, | 6140 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, |
5841 | "RCV RPS: did:x%x/ste:x%x flg:x%x", | 6141 | "RCV RPS: did:x%x/ste:x%x flg:x%x", |
@@ -5866,6 +6166,15 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
5866 | if (newnode) | 6166 | if (newnode) |
5867 | lpfc_nlp_put(ndlp); | 6167 | lpfc_nlp_put(ndlp); |
5868 | break; | 6168 | break; |
6169 | case ELS_CMD_RTV: | ||
6170 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, | ||
6171 | "RCV RTV: did:x%x/ste:x%x flg:x%x", | ||
6172 | did, vport->port_state, ndlp->nlp_flag); | ||
6173 | phba->fc_stat.elsRcvRTV++; | ||
6174 | lpfc_els_rcv_rtv(vport, elsiocb, ndlp); | ||
6175 | if (newnode) | ||
6176 | lpfc_nlp_put(ndlp); | ||
6177 | break; | ||
5869 | case ELS_CMD_RRQ: | 6178 | case ELS_CMD_RRQ: |
5870 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, | 6179 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, |
5871 | "RCV RRQ: did:x%x/ste:x%x flg:x%x", | 6180 | "RCV RRQ: did:x%x/ste:x%x flg:x%x", |
@@ -5876,6 +6185,16 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
5876 | if (newnode) | 6185 | if (newnode) |
5877 | lpfc_nlp_put(ndlp); | 6186 | lpfc_nlp_put(ndlp); |
5878 | break; | 6187 | break; |
6188 | case ELS_CMD_ECHO: | ||
6189 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, | ||
6190 | "RCV ECHO: did:x%x/ste:x%x flg:x%x", | ||
6191 | did, vport->port_state, ndlp->nlp_flag); | ||
6192 | |||
6193 | phba->fc_stat.elsRcvECHO++; | ||
6194 | lpfc_els_rcv_echo(vport, elsiocb, ndlp); | ||
6195 | if (newnode) | ||
6196 | lpfc_nlp_put(ndlp); | ||
6197 | break; | ||
5879 | default: | 6198 | default: |
5880 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, | 6199 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, |
5881 | "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", | 6200 | "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", |
@@ -6170,6 +6489,8 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
6170 | 6489 | ||
6171 | default: | 6490 | default: |
6172 | /* Try to recover from this error */ | 6491 | /* Try to recover from this error */ |
6492 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
6493 | lpfc_sli4_unreg_all_rpis(vport); | ||
6173 | lpfc_mbx_unreg_vpi(vport); | 6494 | lpfc_mbx_unreg_vpi(vport); |
6174 | spin_lock_irq(shost->host_lock); | 6495 | spin_lock_irq(shost->host_lock); |
6175 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; | 6496 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; |
@@ -6437,6 +6758,10 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
6437 | lpfc_unreg_rpi(vport, np); | 6758 | lpfc_unreg_rpi(vport, np); |
6438 | } | 6759 | } |
6439 | lpfc_cleanup_pending_mbox(vport); | 6760 | lpfc_cleanup_pending_mbox(vport); |
6761 | |||
6762 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
6763 | lpfc_sli4_unreg_all_rpis(vport); | ||
6764 | |||
6440 | lpfc_mbx_unreg_vpi(vport); | 6765 | lpfc_mbx_unreg_vpi(vport); |
6441 | spin_lock_irq(shost->host_lock); | 6766 | spin_lock_irq(shost->host_lock); |
6442 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; | 6767 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; |
@@ -6452,7 +6777,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
6452 | * to update the MAC address. | 6777 | * to update the MAC address. |
6453 | */ | 6778 | */ |
6454 | lpfc_register_new_vport(phba, vport, ndlp); | 6779 | lpfc_register_new_vport(phba, vport, ndlp); |
6455 | return ; | 6780 | goto out; |
6456 | } | 6781 | } |
6457 | 6782 | ||
6458 | if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) | 6783 | if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) |
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 2a6866e63aae..0873fbd19e18 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
@@ -20,6 +20,7 @@ | |||
20 | *******************************************************************/ | 20 | *******************************************************************/ |
21 | 21 | ||
22 | #include <linux/blkdev.h> | 22 | #include <linux/blkdev.h> |
23 | #include <linux/delay.h> | ||
23 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
24 | #include <linux/pci.h> | 25 | #include <linux/pci.h> |
25 | #include <linux/kthread.h> | 26 | #include <linux/kthread.h> |
@@ -63,6 +64,7 @@ static uint8_t lpfcAlpaArray[] = { | |||
63 | static void lpfc_disc_timeout_handler(struct lpfc_vport *); | 64 | static void lpfc_disc_timeout_handler(struct lpfc_vport *); |
64 | static void lpfc_disc_flush_list(struct lpfc_vport *vport); | 65 | static void lpfc_disc_flush_list(struct lpfc_vport *vport); |
65 | static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); | 66 | static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); |
67 | static int lpfc_fcf_inuse(struct lpfc_hba *); | ||
66 | 68 | ||
67 | void | 69 | void |
68 | lpfc_terminate_rport_io(struct fc_rport *rport) | 70 | lpfc_terminate_rport_io(struct fc_rport *rport) |
@@ -160,11 +162,17 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) | |||
160 | return; | 162 | return; |
161 | } | 163 | } |
162 | 164 | ||
163 | /* | 165 | /** |
164 | * This function is called from the worker thread when dev_loss_tmo | 166 | * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler |
165 | * expire. | 167 | * @ndlp: Pointer to remote node object. |
166 | */ | 168 | * |
167 | static void | 169 | * This function is called from the worker thread when devloss timeout timer |
170 | * expires. For SLI4 host, this routine shall return 1 when at lease one | ||
171 | * remote node, including this @ndlp, is still in use of FCF; otherwise, this | ||
172 | * routine shall return 0 when there is no remote node is still in use of FCF | ||
173 | * when devloss timeout happened to this @ndlp. | ||
174 | **/ | ||
175 | static int | ||
168 | lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) | 176 | lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) |
169 | { | 177 | { |
170 | struct lpfc_rport_data *rdata; | 178 | struct lpfc_rport_data *rdata; |
@@ -175,17 +183,21 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) | |||
175 | int put_node; | 183 | int put_node; |
176 | int put_rport; | 184 | int put_rport; |
177 | int warn_on = 0; | 185 | int warn_on = 0; |
186 | int fcf_inuse = 0; | ||
178 | 187 | ||
179 | rport = ndlp->rport; | 188 | rport = ndlp->rport; |
180 | 189 | ||
181 | if (!rport) | 190 | if (!rport) |
182 | return; | 191 | return fcf_inuse; |
183 | 192 | ||
184 | rdata = rport->dd_data; | 193 | rdata = rport->dd_data; |
185 | name = (uint8_t *) &ndlp->nlp_portname; | 194 | name = (uint8_t *) &ndlp->nlp_portname; |
186 | vport = ndlp->vport; | 195 | vport = ndlp->vport; |
187 | phba = vport->phba; | 196 | phba = vport->phba; |
188 | 197 | ||
198 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
199 | fcf_inuse = lpfc_fcf_inuse(phba); | ||
200 | |||
189 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, | 201 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, |
190 | "rport devlosstmo:did:x%x type:x%x id:x%x", | 202 | "rport devlosstmo:did:x%x type:x%x id:x%x", |
191 | ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id); | 203 | ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id); |
@@ -209,7 +221,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) | |||
209 | lpfc_nlp_put(ndlp); | 221 | lpfc_nlp_put(ndlp); |
210 | if (put_rport) | 222 | if (put_rport) |
211 | put_device(&rport->dev); | 223 | put_device(&rport->dev); |
212 | return; | 224 | return fcf_inuse; |
213 | } | 225 | } |
214 | 226 | ||
215 | if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) { | 227 | if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) { |
@@ -220,7 +232,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) | |||
220 | *name, *(name+1), *(name+2), *(name+3), | 232 | *name, *(name+1), *(name+2), *(name+3), |
221 | *(name+4), *(name+5), *(name+6), *(name+7), | 233 | *(name+4), *(name+5), *(name+6), *(name+7), |
222 | ndlp->nlp_DID); | 234 | ndlp->nlp_DID); |
223 | return; | 235 | return fcf_inuse; |
224 | } | 236 | } |
225 | 237 | ||
226 | if (ndlp->nlp_type & NLP_FABRIC) { | 238 | if (ndlp->nlp_type & NLP_FABRIC) { |
@@ -233,7 +245,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) | |||
233 | lpfc_nlp_put(ndlp); | 245 | lpfc_nlp_put(ndlp); |
234 | if (put_rport) | 246 | if (put_rport) |
235 | put_device(&rport->dev); | 247 | put_device(&rport->dev); |
236 | return; | 248 | return fcf_inuse; |
237 | } | 249 | } |
238 | 250 | ||
239 | if (ndlp->nlp_sid != NLP_NO_SID) { | 251 | if (ndlp->nlp_sid != NLP_NO_SID) { |
@@ -280,6 +292,74 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) | |||
280 | (ndlp->nlp_state != NLP_STE_PRLI_ISSUE)) | 292 | (ndlp->nlp_state != NLP_STE_PRLI_ISSUE)) |
281 | lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); | 293 | lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); |
282 | 294 | ||
295 | return fcf_inuse; | ||
296 | } | ||
297 | |||
298 | /** | ||
299 | * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler | ||
300 | * @phba: Pointer to hba context object. | ||
301 | * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler. | ||
302 | * @nlp_did: remote node identifer with devloss timeout. | ||
303 | * | ||
304 | * This function is called from the worker thread after invoking devloss | ||
305 | * timeout handler and releasing the reference count for the ndlp with | ||
306 | * which the devloss timeout was handled for SLI4 host. For the devloss | ||
307 | * timeout of the last remote node which had been in use of FCF, when this | ||
308 | * routine is invoked, it shall be guaranteed that none of the remote are | ||
309 | * in-use of FCF. When devloss timeout to the last remote using the FCF, | ||
310 | * if the FIP engine is neither in FCF table scan process nor roundrobin | ||
311 | * failover process, the in-use FCF shall be unregistered. If the FIP | ||
312 | * engine is in FCF discovery process, the devloss timeout state shall | ||
313 | * be set for either the FCF table scan process or roundrobin failover | ||
314 | * process to unregister the in-use FCF. | ||
315 | **/ | ||
316 | static void | ||
317 | lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse, | ||
318 | uint32_t nlp_did) | ||
319 | { | ||
320 | /* If devloss timeout happened to a remote node when FCF had no | ||
321 | * longer been in-use, do nothing. | ||
322 | */ | ||
323 | if (!fcf_inuse) | ||
324 | return; | ||
325 | |||
326 | if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) { | ||
327 | spin_lock_irq(&phba->hbalock); | ||
328 | if (phba->fcf.fcf_flag & FCF_DISCOVERY) { | ||
329 | if (phba->hba_flag & HBA_DEVLOSS_TMO) { | ||
330 | spin_unlock_irq(&phba->hbalock); | ||
331 | return; | ||
332 | } | ||
333 | phba->hba_flag |= HBA_DEVLOSS_TMO; | ||
334 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | ||
335 | "2847 Last remote node (x%x) using " | ||
336 | "FCF devloss tmo\n", nlp_did); | ||
337 | } | ||
338 | if (phba->fcf.fcf_flag & FCF_REDISC_PROG) { | ||
339 | spin_unlock_irq(&phba->hbalock); | ||
340 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | ||
341 | "2868 Devloss tmo to FCF rediscovery " | ||
342 | "in progress\n"); | ||
343 | return; | ||
344 | } | ||
345 | if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) { | ||
346 | spin_unlock_irq(&phba->hbalock); | ||
347 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | ||
348 | "2869 Devloss tmo to idle FIP engine, " | ||
349 | "unreg in-use FCF and rescan.\n"); | ||
350 | /* Unregister in-use FCF and rescan */ | ||
351 | lpfc_unregister_fcf_rescan(phba); | ||
352 | return; | ||
353 | } | ||
354 | spin_unlock_irq(&phba->hbalock); | ||
355 | if (phba->hba_flag & FCF_TS_INPROG) | ||
356 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | ||
357 | "2870 FCF table scan in progress\n"); | ||
358 | if (phba->hba_flag & FCF_RR_INPROG) | ||
359 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | ||
360 | "2871 FLOGI roundrobin FCF failover " | ||
361 | "in progress\n"); | ||
362 | } | ||
283 | lpfc_unregister_unused_fcf(phba); | 363 | lpfc_unregister_unused_fcf(phba); |
284 | } | 364 | } |
285 | 365 | ||
@@ -408,6 +488,8 @@ lpfc_work_list_done(struct lpfc_hba *phba) | |||
408 | struct lpfc_work_evt *evtp = NULL; | 488 | struct lpfc_work_evt *evtp = NULL; |
409 | struct lpfc_nodelist *ndlp; | 489 | struct lpfc_nodelist *ndlp; |
410 | int free_evt; | 490 | int free_evt; |
491 | int fcf_inuse; | ||
492 | uint32_t nlp_did; | ||
411 | 493 | ||
412 | spin_lock_irq(&phba->hbalock); | 494 | spin_lock_irq(&phba->hbalock); |
413 | while (!list_empty(&phba->work_list)) { | 495 | while (!list_empty(&phba->work_list)) { |
@@ -427,12 +509,17 @@ lpfc_work_list_done(struct lpfc_hba *phba) | |||
427 | break; | 509 | break; |
428 | case LPFC_EVT_DEV_LOSS: | 510 | case LPFC_EVT_DEV_LOSS: |
429 | ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); | 511 | ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); |
430 | lpfc_dev_loss_tmo_handler(ndlp); | 512 | fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp); |
431 | free_evt = 0; | 513 | free_evt = 0; |
432 | /* decrement the node reference count held for | 514 | /* decrement the node reference count held for |
433 | * this queued work | 515 | * this queued work |
434 | */ | 516 | */ |
517 | nlp_did = ndlp->nlp_DID; | ||
435 | lpfc_nlp_put(ndlp); | 518 | lpfc_nlp_put(ndlp); |
519 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
520 | lpfc_sli4_post_dev_loss_tmo_handler(phba, | ||
521 | fcf_inuse, | ||
522 | nlp_did); | ||
436 | break; | 523 | break; |
437 | case LPFC_EVT_ONLINE: | 524 | case LPFC_EVT_ONLINE: |
438 | if (phba->link_state < LPFC_LINK_DOWN) | 525 | if (phba->link_state < LPFC_LINK_DOWN) |
@@ -707,6 +794,8 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) | |||
707 | : NLP_EVT_DEVICE_RECOVERY); | 794 | : NLP_EVT_DEVICE_RECOVERY); |
708 | } | 795 | } |
709 | if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) { | 796 | if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) { |
797 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
798 | lpfc_sli4_unreg_all_rpis(vport); | ||
710 | lpfc_mbx_unreg_vpi(vport); | 799 | lpfc_mbx_unreg_vpi(vport); |
711 | spin_lock_irq(shost->host_lock); | 800 | spin_lock_irq(shost->host_lock); |
712 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; | 801 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; |
@@ -1021,8 +1110,7 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
1021 | "2017 REG_FCFI mbxStatus error x%x " | 1110 | "2017 REG_FCFI mbxStatus error x%x " |
1022 | "HBA state x%x\n", | 1111 | "HBA state x%x\n", |
1023 | mboxq->u.mb.mbxStatus, vport->port_state); | 1112 | mboxq->u.mb.mbxStatus, vport->port_state); |
1024 | mempool_free(mboxq, phba->mbox_mem_pool); | 1113 | goto fail_out; |
1025 | return; | ||
1026 | } | 1114 | } |
1027 | 1115 | ||
1028 | /* Start FCoE discovery by sending a FLOGI. */ | 1116 | /* Start FCoE discovery by sending a FLOGI. */ |
@@ -1031,20 +1119,30 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
1031 | spin_lock_irq(&phba->hbalock); | 1119 | spin_lock_irq(&phba->hbalock); |
1032 | phba->fcf.fcf_flag |= FCF_REGISTERED; | 1120 | phba->fcf.fcf_flag |= FCF_REGISTERED; |
1033 | spin_unlock_irq(&phba->hbalock); | 1121 | spin_unlock_irq(&phba->hbalock); |
1122 | |||
1034 | /* If there is a pending FCoE event, restart FCF table scan. */ | 1123 | /* If there is a pending FCoE event, restart FCF table scan. */ |
1035 | if (lpfc_check_pending_fcoe_event(phba, 1)) { | 1124 | if (lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) |
1036 | mempool_free(mboxq, phba->mbox_mem_pool); | 1125 | goto fail_out; |
1037 | return; | 1126 | |
1038 | } | 1127 | /* Mark successful completion of FCF table scan */ |
1039 | spin_lock_irq(&phba->hbalock); | 1128 | spin_lock_irq(&phba->hbalock); |
1040 | phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); | 1129 | phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); |
1041 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; | 1130 | phba->hba_flag &= ~FCF_TS_INPROG; |
1042 | spin_unlock_irq(&phba->hbalock); | 1131 | if (vport->port_state != LPFC_FLOGI) { |
1043 | if (vport->port_state != LPFC_FLOGI) | 1132 | phba->hba_flag |= FCF_RR_INPROG; |
1133 | spin_unlock_irq(&phba->hbalock); | ||
1044 | lpfc_initial_flogi(vport); | 1134 | lpfc_initial_flogi(vport); |
1135 | goto out; | ||
1136 | } | ||
1137 | spin_unlock_irq(&phba->hbalock); | ||
1138 | goto out; | ||
1045 | 1139 | ||
1140 | fail_out: | ||
1141 | spin_lock_irq(&phba->hbalock); | ||
1142 | phba->hba_flag &= ~FCF_RR_INPROG; | ||
1143 | spin_unlock_irq(&phba->hbalock); | ||
1144 | out: | ||
1046 | mempool_free(mboxq, phba->mbox_mem_pool); | 1145 | mempool_free(mboxq, phba->mbox_mem_pool); |
1047 | return; | ||
1048 | } | 1146 | } |
1049 | 1147 | ||
1050 | /** | 1148 | /** |
@@ -1241,10 +1339,9 @@ lpfc_register_fcf(struct lpfc_hba *phba) | |||
1241 | int rc; | 1339 | int rc; |
1242 | 1340 | ||
1243 | spin_lock_irq(&phba->hbalock); | 1341 | spin_lock_irq(&phba->hbalock); |
1244 | |||
1245 | /* If the FCF is not availabe do nothing. */ | 1342 | /* If the FCF is not availabe do nothing. */ |
1246 | if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { | 1343 | if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { |
1247 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; | 1344 | phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); |
1248 | spin_unlock_irq(&phba->hbalock); | 1345 | spin_unlock_irq(&phba->hbalock); |
1249 | return; | 1346 | return; |
1250 | } | 1347 | } |
@@ -1252,19 +1349,22 @@ lpfc_register_fcf(struct lpfc_hba *phba) | |||
1252 | /* The FCF is already registered, start discovery */ | 1349 | /* The FCF is already registered, start discovery */ |
1253 | if (phba->fcf.fcf_flag & FCF_REGISTERED) { | 1350 | if (phba->fcf.fcf_flag & FCF_REGISTERED) { |
1254 | phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); | 1351 | phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); |
1255 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; | 1352 | phba->hba_flag &= ~FCF_TS_INPROG; |
1256 | spin_unlock_irq(&phba->hbalock); | 1353 | if (phba->pport->port_state != LPFC_FLOGI) { |
1257 | if (phba->pport->port_state != LPFC_FLOGI) | 1354 | phba->hba_flag |= FCF_RR_INPROG; |
1355 | spin_unlock_irq(&phba->hbalock); | ||
1258 | lpfc_initial_flogi(phba->pport); | 1356 | lpfc_initial_flogi(phba->pport); |
1357 | return; | ||
1358 | } | ||
1359 | spin_unlock_irq(&phba->hbalock); | ||
1259 | return; | 1360 | return; |
1260 | } | 1361 | } |
1261 | spin_unlock_irq(&phba->hbalock); | 1362 | spin_unlock_irq(&phba->hbalock); |
1262 | 1363 | ||
1263 | fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, | 1364 | fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
1264 | GFP_KERNEL); | ||
1265 | if (!fcf_mbxq) { | 1365 | if (!fcf_mbxq) { |
1266 | spin_lock_irq(&phba->hbalock); | 1366 | spin_lock_irq(&phba->hbalock); |
1267 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; | 1367 | phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); |
1268 | spin_unlock_irq(&phba->hbalock); | 1368 | spin_unlock_irq(&phba->hbalock); |
1269 | return; | 1369 | return; |
1270 | } | 1370 | } |
@@ -1275,7 +1375,7 @@ lpfc_register_fcf(struct lpfc_hba *phba) | |||
1275 | rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); | 1375 | rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); |
1276 | if (rc == MBX_NOT_FINISHED) { | 1376 | if (rc == MBX_NOT_FINISHED) { |
1277 | spin_lock_irq(&phba->hbalock); | 1377 | spin_lock_irq(&phba->hbalock); |
1278 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; | 1378 | phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); |
1279 | spin_unlock_irq(&phba->hbalock); | 1379 | spin_unlock_irq(&phba->hbalock); |
1280 | mempool_free(fcf_mbxq, phba->mbox_mem_pool); | 1380 | mempool_free(fcf_mbxq, phba->mbox_mem_pool); |
1281 | } | 1381 | } |
@@ -1493,7 +1593,7 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) | |||
1493 | * FCF discovery, no need to restart FCF discovery. | 1593 | * FCF discovery, no need to restart FCF discovery. |
1494 | */ | 1594 | */ |
1495 | if ((phba->link_state >= LPFC_LINK_UP) && | 1595 | if ((phba->link_state >= LPFC_LINK_UP) && |
1496 | (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) | 1596 | (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) |
1497 | return 0; | 1597 | return 0; |
1498 | 1598 | ||
1499 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | 1599 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
@@ -1517,14 +1617,14 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) | |||
1517 | lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); | 1617 | lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); |
1518 | } else { | 1618 | } else { |
1519 | /* | 1619 | /* |
1520 | * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS | 1620 | * Do not continue FCF discovery and clear FCF_TS_INPROG |
1521 | * flag | 1621 | * flag |
1522 | */ | 1622 | */ |
1523 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, | 1623 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, |
1524 | "2833 Stop FCF discovery process due to link " | 1624 | "2833 Stop FCF discovery process due to link " |
1525 | "state change (x%x)\n", phba->link_state); | 1625 | "state change (x%x)\n", phba->link_state); |
1526 | spin_lock_irq(&phba->hbalock); | 1626 | spin_lock_irq(&phba->hbalock); |
1527 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; | 1627 | phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); |
1528 | phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); | 1628 | phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); |
1529 | spin_unlock_irq(&phba->hbalock); | 1629 | spin_unlock_irq(&phba->hbalock); |
1530 | } | 1630 | } |
@@ -1729,6 +1829,65 @@ lpfc_sli4_fcf_record_match(struct lpfc_hba *phba, | |||
1729 | } | 1829 | } |
1730 | 1830 | ||
1731 | /** | 1831 | /** |
1832 | * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf | ||
1833 | * @vport: Pointer to vport object. | ||
1834 | * @fcf_index: index to next fcf. | ||
1835 | * | ||
1836 | * This function processing the roundrobin fcf failover to next fcf index. | ||
1837 | * When this function is invoked, there will be a current fcf registered | ||
1838 | * for flogi. | ||
1839 | * Return: 0 for continue retrying flogi on currently registered fcf; | ||
1840 | * 1 for stop flogi on currently registered fcf; | ||
1841 | */ | ||
1842 | int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index) | ||
1843 | { | ||
1844 | struct lpfc_hba *phba = vport->phba; | ||
1845 | int rc; | ||
1846 | |||
1847 | if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { | ||
1848 | spin_lock_irq(&phba->hbalock); | ||
1849 | if (phba->hba_flag & HBA_DEVLOSS_TMO) { | ||
1850 | spin_unlock_irq(&phba->hbalock); | ||
1851 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | ||
1852 | "2872 Devloss tmo with no eligible " | ||
1853 | "FCF, unregister in-use FCF (x%x) " | ||
1854 | "and rescan FCF table\n", | ||
1855 | phba->fcf.current_rec.fcf_indx); | ||
1856 | lpfc_unregister_fcf_rescan(phba); | ||
1857 | goto stop_flogi_current_fcf; | ||
1858 | } | ||
1859 | /* Mark the end to FLOGI roundrobin failover */ | ||
1860 | phba->hba_flag &= ~FCF_RR_INPROG; | ||
1861 | /* Allow action to new fcf asynchronous event */ | ||
1862 | phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); | ||
1863 | spin_unlock_irq(&phba->hbalock); | ||
1864 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | ||
1865 | "2865 No FCF available, stop roundrobin FCF " | ||
1866 | "failover and change port state:x%x/x%x\n", | ||
1867 | phba->pport->port_state, LPFC_VPORT_UNKNOWN); | ||
1868 | phba->pport->port_state = LPFC_VPORT_UNKNOWN; | ||
1869 | goto stop_flogi_current_fcf; | ||
1870 | } else { | ||
1871 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS, | ||
1872 | "2794 Try FLOGI roundrobin FCF failover to " | ||
1873 | "(x%x)\n", fcf_index); | ||
1874 | rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index); | ||
1875 | if (rc) | ||
1876 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, | ||
1877 | "2761 FLOGI roundrobin FCF failover " | ||
1878 | "failed (rc:x%x) to read FCF (x%x)\n", | ||
1879 | rc, phba->fcf.current_rec.fcf_indx); | ||
1880 | else | ||
1881 | goto stop_flogi_current_fcf; | ||
1882 | } | ||
1883 | return 0; | ||
1884 | |||
1885 | stop_flogi_current_fcf: | ||
1886 | lpfc_can_disctmo(vport); | ||
1887 | return 1; | ||
1888 | } | ||
1889 | |||
1890 | /** | ||
1732 | * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. | 1891 | * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. |
1733 | * @phba: pointer to lpfc hba data structure. | 1892 | * @phba: pointer to lpfc hba data structure. |
1734 | * @mboxq: pointer to mailbox object. | 1893 | * @mboxq: pointer to mailbox object. |
@@ -1756,7 +1915,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
1756 | int rc; | 1915 | int rc; |
1757 | 1916 | ||
1758 | /* If there is pending FCoE event restart FCF table scan */ | 1917 | /* If there is pending FCoE event restart FCF table scan */ |
1759 | if (lpfc_check_pending_fcoe_event(phba, 0)) { | 1918 | if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) { |
1760 | lpfc_sli4_mbox_cmd_free(phba, mboxq); | 1919 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
1761 | return; | 1920 | return; |
1762 | } | 1921 | } |
@@ -1765,12 +1924,12 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
1765 | new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, | 1924 | new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, |
1766 | &next_fcf_index); | 1925 | &next_fcf_index); |
1767 | if (!new_fcf_record) { | 1926 | if (!new_fcf_record) { |
1768 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, | 1927 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP, |
1769 | "2765 Mailbox command READ_FCF_RECORD " | 1928 | "2765 Mailbox command READ_FCF_RECORD " |
1770 | "failed to retrieve a FCF record.\n"); | 1929 | "failed to retrieve a FCF record.\n"); |
1771 | /* Let next new FCF event trigger fast failover */ | 1930 | /* Let next new FCF event trigger fast failover */ |
1772 | spin_lock_irq(&phba->hbalock); | 1931 | spin_lock_irq(&phba->hbalock); |
1773 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; | 1932 | phba->hba_flag &= ~FCF_TS_INPROG; |
1774 | spin_unlock_irq(&phba->hbalock); | 1933 | spin_unlock_irq(&phba->hbalock); |
1775 | lpfc_sli4_mbox_cmd_free(phba, mboxq); | 1934 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
1776 | return; | 1935 | return; |
@@ -1787,13 +1946,12 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
1787 | /* | 1946 | /* |
1788 | * If the fcf record does not match with connect list entries | 1947 | * If the fcf record does not match with connect list entries |
1789 | * read the next entry; otherwise, this is an eligible FCF | 1948 | * read the next entry; otherwise, this is an eligible FCF |
1790 | * record for round robin FCF failover. | 1949 | * record for roundrobin FCF failover. |
1791 | */ | 1950 | */ |
1792 | if (!rc) { | 1951 | if (!rc) { |
1793 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, | 1952 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, |
1794 | "2781 FCF record (x%x) failed FCF " | 1953 | "2781 FCF (x%x) failed connection " |
1795 | "connection list check, fcf_avail:x%x, " | 1954 | "list check: (x%x/x%x)\n", |
1796 | "fcf_valid:x%x\n", | ||
1797 | bf_get(lpfc_fcf_record_fcf_index, | 1955 | bf_get(lpfc_fcf_record_fcf_index, |
1798 | new_fcf_record), | 1956 | new_fcf_record), |
1799 | bf_get(lpfc_fcf_record_fcf_avail, | 1957 | bf_get(lpfc_fcf_record_fcf_avail, |
@@ -1803,6 +1961,16 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
1803 | if ((phba->fcf.fcf_flag & FCF_IN_USE) && | 1961 | if ((phba->fcf.fcf_flag & FCF_IN_USE) && |
1804 | lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, | 1962 | lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, |
1805 | new_fcf_record, LPFC_FCOE_IGNORE_VID)) { | 1963 | new_fcf_record, LPFC_FCOE_IGNORE_VID)) { |
1964 | if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) != | ||
1965 | phba->fcf.current_rec.fcf_indx) { | ||
1966 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP, | ||
1967 | "2862 FCF (x%x) matches property " | ||
1968 | "of in-use FCF (x%x)\n", | ||
1969 | bf_get(lpfc_fcf_record_fcf_index, | ||
1970 | new_fcf_record), | ||
1971 | phba->fcf.current_rec.fcf_indx); | ||
1972 | goto read_next_fcf; | ||
1973 | } | ||
1806 | /* | 1974 | /* |
1807 | * In case the current in-use FCF record becomes | 1975 | * In case the current in-use FCF record becomes |
1808 | * invalid/unavailable during FCF discovery that | 1976 | * invalid/unavailable during FCF discovery that |
@@ -1813,9 +1981,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
1813 | !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { | 1981 | !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { |
1814 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, | 1982 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, |
1815 | "2835 Invalid in-use FCF " | 1983 | "2835 Invalid in-use FCF " |
1816 | "record (x%x) reported, " | 1984 | "(x%x), enter FCF failover " |
1817 | "entering fast FCF failover " | 1985 | "table scan.\n", |
1818 | "mode scanning.\n", | ||
1819 | phba->fcf.current_rec.fcf_indx); | 1986 | phba->fcf.current_rec.fcf_indx); |
1820 | spin_lock_irq(&phba->hbalock); | 1987 | spin_lock_irq(&phba->hbalock); |
1821 | phba->fcf.fcf_flag |= FCF_REDISC_FOV; | 1988 | phba->fcf.fcf_flag |= FCF_REDISC_FOV; |
@@ -1844,22 +2011,29 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
1844 | if (phba->fcf.fcf_flag & FCF_IN_USE) { | 2011 | if (phba->fcf.fcf_flag & FCF_IN_USE) { |
1845 | if (lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, | 2012 | if (lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, |
1846 | new_fcf_record, vlan_id)) { | 2013 | new_fcf_record, vlan_id)) { |
1847 | phba->fcf.fcf_flag |= FCF_AVAILABLE; | 2014 | if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) == |
1848 | if (phba->fcf.fcf_flag & FCF_REDISC_PEND) | 2015 | phba->fcf.current_rec.fcf_indx) { |
1849 | /* Stop FCF redisc wait timer if pending */ | 2016 | phba->fcf.fcf_flag |= FCF_AVAILABLE; |
1850 | __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); | 2017 | if (phba->fcf.fcf_flag & FCF_REDISC_PEND) |
1851 | else if (phba->fcf.fcf_flag & FCF_REDISC_FOV) | 2018 | /* Stop FCF redisc wait timer */ |
1852 | /* If in fast failover, mark it's completed */ | 2019 | __lpfc_sli4_stop_fcf_redisc_wait_timer( |
1853 | phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; | 2020 | phba); |
1854 | spin_unlock_irq(&phba->hbalock); | 2021 | else if (phba->fcf.fcf_flag & FCF_REDISC_FOV) |
1855 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | 2022 | /* Fast failover, mark completed */ |
1856 | "2836 The new FCF record (x%x) " | 2023 | phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; |
1857 | "matches the in-use FCF record " | 2024 | spin_unlock_irq(&phba->hbalock); |
1858 | "(x%x)\n", | 2025 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
1859 | phba->fcf.current_rec.fcf_indx, | 2026 | "2836 New FCF matches in-use " |
2027 | "FCF (x%x)\n", | ||
2028 | phba->fcf.current_rec.fcf_indx); | ||
2029 | goto out; | ||
2030 | } else | ||
2031 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP, | ||
2032 | "2863 New FCF (x%x) matches " | ||
2033 | "property of in-use FCF (x%x)\n", | ||
1860 | bf_get(lpfc_fcf_record_fcf_index, | 2034 | bf_get(lpfc_fcf_record_fcf_index, |
1861 | new_fcf_record)); | 2035 | new_fcf_record), |
1862 | goto out; | 2036 | phba->fcf.current_rec.fcf_indx); |
1863 | } | 2037 | } |
1864 | /* | 2038 | /* |
1865 | * Read next FCF record from HBA searching for the matching | 2039 | * Read next FCF record from HBA searching for the matching |
@@ -1953,8 +2127,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
1953 | */ | 2127 | */ |
1954 | if (fcf_rec) { | 2128 | if (fcf_rec) { |
1955 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | 2129 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
1956 | "2840 Update current FCF record " | 2130 | "2840 Update initial FCF candidate " |
1957 | "with initial FCF record (x%x)\n", | 2131 | "with FCF (x%x)\n", |
1958 | bf_get(lpfc_fcf_record_fcf_index, | 2132 | bf_get(lpfc_fcf_record_fcf_index, |
1959 | new_fcf_record)); | 2133 | new_fcf_record)); |
1960 | __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, | 2134 | __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, |
@@ -1984,20 +2158,28 @@ read_next_fcf: | |||
1984 | */ | 2158 | */ |
1985 | if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) { | 2159 | if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) { |
1986 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, | 2160 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, |
1987 | "2782 No suitable FCF record " | 2161 | "2782 No suitable FCF found: " |
1988 | "found during this round of " | 2162 | "(x%x/x%x)\n", |
1989 | "post FCF rediscovery scan: " | ||
1990 | "fcf_evt_tag:x%x, fcf_index: " | ||
1991 | "x%x\n", | ||
1992 | phba->fcoe_eventtag_at_fcf_scan, | 2163 | phba->fcoe_eventtag_at_fcf_scan, |
1993 | bf_get(lpfc_fcf_record_fcf_index, | 2164 | bf_get(lpfc_fcf_record_fcf_index, |
1994 | new_fcf_record)); | 2165 | new_fcf_record)); |
2166 | spin_lock_irq(&phba->hbalock); | ||
2167 | if (phba->hba_flag & HBA_DEVLOSS_TMO) { | ||
2168 | phba->hba_flag &= ~FCF_TS_INPROG; | ||
2169 | spin_unlock_irq(&phba->hbalock); | ||
2170 | /* Unregister in-use FCF and rescan */ | ||
2171 | lpfc_printf_log(phba, KERN_INFO, | ||
2172 | LOG_FIP, | ||
2173 | "2864 On devloss tmo " | ||
2174 | "unreg in-use FCF and " | ||
2175 | "rescan FCF table\n"); | ||
2176 | lpfc_unregister_fcf_rescan(phba); | ||
2177 | return; | ||
2178 | } | ||
1995 | /* | 2179 | /* |
1996 | * Let next new FCF event trigger fast | 2180 | * Let next new FCF event trigger fast failover |
1997 | * failover | ||
1998 | */ | 2181 | */ |
1999 | spin_lock_irq(&phba->hbalock); | 2182 | phba->hba_flag &= ~FCF_TS_INPROG; |
2000 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; | ||
2001 | spin_unlock_irq(&phba->hbalock); | 2183 | spin_unlock_irq(&phba->hbalock); |
2002 | return; | 2184 | return; |
2003 | } | 2185 | } |
@@ -2015,9 +2197,8 @@ read_next_fcf: | |||
2015 | 2197 | ||
2016 | /* Replace in-use record with the new record */ | 2198 | /* Replace in-use record with the new record */ |
2017 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | 2199 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2018 | "2842 Replace the current in-use " | 2200 | "2842 Replace in-use FCF (x%x) " |
2019 | "FCF record (x%x) with failover FCF " | 2201 | "with failover FCF (x%x)\n", |
2020 | "record (x%x)\n", | ||
2021 | phba->fcf.current_rec.fcf_indx, | 2202 | phba->fcf.current_rec.fcf_indx, |
2022 | phba->fcf.failover_rec.fcf_indx); | 2203 | phba->fcf.failover_rec.fcf_indx); |
2023 | memcpy(&phba->fcf.current_rec, | 2204 | memcpy(&phba->fcf.current_rec, |
@@ -2029,15 +2210,8 @@ read_next_fcf: | |||
2029 | * FCF failover. | 2210 | * FCF failover. |
2030 | */ | 2211 | */ |
2031 | spin_lock_irq(&phba->hbalock); | 2212 | spin_lock_irq(&phba->hbalock); |
2032 | phba->fcf.fcf_flag &= | 2213 | phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; |
2033 | ~(FCF_REDISC_FOV | FCF_REDISC_RRU); | ||
2034 | spin_unlock_irq(&phba->hbalock); | 2214 | spin_unlock_irq(&phba->hbalock); |
2035 | /* | ||
2036 | * Set up the initial registered FCF index for FLOGI | ||
2037 | * round robin FCF failover. | ||
2038 | */ | ||
2039 | phba->fcf.fcf_rr_init_indx = | ||
2040 | phba->fcf.failover_rec.fcf_indx; | ||
2041 | /* Register to the new FCF record */ | 2215 | /* Register to the new FCF record */ |
2042 | lpfc_register_fcf(phba); | 2216 | lpfc_register_fcf(phba); |
2043 | } else { | 2217 | } else { |
@@ -2069,28 +2243,6 @@ read_next_fcf: | |||
2069 | LPFC_FCOE_FCF_GET_FIRST); | 2243 | LPFC_FCOE_FCF_GET_FIRST); |
2070 | return; | 2244 | return; |
2071 | } | 2245 | } |
2072 | |||
2073 | /* | ||
2074 | * Otherwise, initial scan or post linkdown rescan, | ||
2075 | * register with the best FCF record found so far | ||
2076 | * through the FCF scanning process. | ||
2077 | */ | ||
2078 | |||
2079 | /* | ||
2080 | * Mark the initial FCF discovery completed and | ||
2081 | * the start of the first round of the roundrobin | ||
2082 | * FCF failover. | ||
2083 | */ | ||
2084 | spin_lock_irq(&phba->hbalock); | ||
2085 | phba->fcf.fcf_flag &= | ||
2086 | ~(FCF_INIT_DISC | FCF_REDISC_RRU); | ||
2087 | spin_unlock_irq(&phba->hbalock); | ||
2088 | /* | ||
2089 | * Set up the initial registered FCF index for FLOGI | ||
2090 | * round robin FCF failover | ||
2091 | */ | ||
2092 | phba->fcf.fcf_rr_init_indx = | ||
2093 | phba->fcf.current_rec.fcf_indx; | ||
2094 | /* Register to the new FCF record */ | 2246 | /* Register to the new FCF record */ |
2095 | lpfc_register_fcf(phba); | 2247 | lpfc_register_fcf(phba); |
2096 | } | 2248 | } |
@@ -2106,11 +2258,11 @@ out: | |||
2106 | } | 2258 | } |
2107 | 2259 | ||
2108 | /** | 2260 | /** |
2109 | * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf round robin read_fcf mbox cmpl hdler | 2261 | * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler |
2110 | * @phba: pointer to lpfc hba data structure. | 2262 | * @phba: pointer to lpfc hba data structure. |
2111 | * @mboxq: pointer to mailbox object. | 2263 | * @mboxq: pointer to mailbox object. |
2112 | * | 2264 | * |
2113 | * This is the callback function for FLOGI failure round robin FCF failover | 2265 | * This is the callback function for FLOGI failure roundrobin FCF failover |
2114 | * read FCF record mailbox command from the eligible FCF record bmask for | 2266 | * read FCF record mailbox command from the eligible FCF record bmask for |
2115 | * performing the failover. If the FCF read back is not valid/available, it | 2267 | * performing the failover. If the FCF read back is not valid/available, it |
2116 | * fails through to retrying FLOGI to the currently registered FCF again. | 2268 | * fails through to retrying FLOGI to the currently registered FCF again. |
@@ -2125,17 +2277,18 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
2125 | { | 2277 | { |
2126 | struct fcf_record *new_fcf_record; | 2278 | struct fcf_record *new_fcf_record; |
2127 | uint32_t boot_flag, addr_mode; | 2279 | uint32_t boot_flag, addr_mode; |
2128 | uint16_t next_fcf_index; | 2280 | uint16_t next_fcf_index, fcf_index; |
2129 | uint16_t current_fcf_index; | 2281 | uint16_t current_fcf_index; |
2130 | uint16_t vlan_id; | 2282 | uint16_t vlan_id; |
2283 | int rc; | ||
2131 | 2284 | ||
2132 | /* If link state is not up, stop the round robin failover process */ | 2285 | /* If link state is not up, stop the roundrobin failover process */ |
2133 | if (phba->link_state < LPFC_LINK_UP) { | 2286 | if (phba->link_state < LPFC_LINK_UP) { |
2134 | spin_lock_irq(&phba->hbalock); | 2287 | spin_lock_irq(&phba->hbalock); |
2135 | phba->fcf.fcf_flag &= ~FCF_DISCOVERY; | 2288 | phba->fcf.fcf_flag &= ~FCF_DISCOVERY; |
2289 | phba->hba_flag &= ~FCF_RR_INPROG; | ||
2136 | spin_unlock_irq(&phba->hbalock); | 2290 | spin_unlock_irq(&phba->hbalock); |
2137 | lpfc_sli4_mbox_cmd_free(phba, mboxq); | 2291 | goto out; |
2138 | return; | ||
2139 | } | 2292 | } |
2140 | 2293 | ||
2141 | /* Parse the FCF record from the non-embedded mailbox command */ | 2294 | /* Parse the FCF record from the non-embedded mailbox command */ |
@@ -2145,23 +2298,47 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
2145 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, | 2298 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, |
2146 | "2766 Mailbox command READ_FCF_RECORD " | 2299 | "2766 Mailbox command READ_FCF_RECORD " |
2147 | "failed to retrieve a FCF record.\n"); | 2300 | "failed to retrieve a FCF record.\n"); |
2148 | goto out; | 2301 | goto error_out; |
2149 | } | 2302 | } |
2150 | 2303 | ||
2151 | /* Get the needed parameters from FCF record */ | 2304 | /* Get the needed parameters from FCF record */ |
2152 | lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, | 2305 | rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, |
2153 | &addr_mode, &vlan_id); | 2306 | &addr_mode, &vlan_id); |
2154 | 2307 | ||
2155 | /* Log the FCF record information if turned on */ | 2308 | /* Log the FCF record information if turned on */ |
2156 | lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, | 2309 | lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, |
2157 | next_fcf_index); | 2310 | next_fcf_index); |
2158 | 2311 | ||
2312 | fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); | ||
2313 | if (!rc) { | ||
2314 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | ||
2315 | "2848 Remove ineligible FCF (x%x) from " | ||
2316 | "from roundrobin bmask\n", fcf_index); | ||
2317 | /* Clear roundrobin bmask bit for ineligible FCF */ | ||
2318 | lpfc_sli4_fcf_rr_index_clear(phba, fcf_index); | ||
2319 | /* Perform next round of roundrobin FCF failover */ | ||
2320 | fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); | ||
2321 | rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index); | ||
2322 | if (rc) | ||
2323 | goto out; | ||
2324 | goto error_out; | ||
2325 | } | ||
2326 | |||
2327 | if (fcf_index == phba->fcf.current_rec.fcf_indx) { | ||
2328 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | ||
2329 | "2760 Perform FLOGI roundrobin FCF failover: " | ||
2330 | "FCF (x%x) back to FCF (x%x)\n", | ||
2331 | phba->fcf.current_rec.fcf_indx, fcf_index); | ||
2332 | /* Wait 500 ms before retrying FLOGI to current FCF */ | ||
2333 | msleep(500); | ||
2334 | lpfc_initial_flogi(phba->pport); | ||
2335 | goto out; | ||
2336 | } | ||
2337 | |||
2159 | /* Upload new FCF record to the failover FCF record */ | 2338 | /* Upload new FCF record to the failover FCF record */ |
2160 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | 2339 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2161 | "2834 Update the current FCF record (x%x) " | 2340 | "2834 Update current FCF (x%x) with new FCF (x%x)\n", |
2162 | "with the next FCF record (x%x)\n", | 2341 | phba->fcf.failover_rec.fcf_indx, fcf_index); |
2163 | phba->fcf.failover_rec.fcf_indx, | ||
2164 | bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); | ||
2165 | spin_lock_irq(&phba->hbalock); | 2342 | spin_lock_irq(&phba->hbalock); |
2166 | __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec, | 2343 | __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec, |
2167 | new_fcf_record, addr_mode, vlan_id, | 2344 | new_fcf_record, addr_mode, vlan_id, |
@@ -2178,14 +2355,13 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
2178 | sizeof(struct lpfc_fcf_rec)); | 2355 | sizeof(struct lpfc_fcf_rec)); |
2179 | 2356 | ||
2180 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | 2357 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2181 | "2783 FLOGI round robin FCF failover from FCF " | 2358 | "2783 Perform FLOGI roundrobin FCF failover: FCF " |
2182 | "(x%x) to FCF (x%x).\n", | 2359 | "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index); |
2183 | current_fcf_index, | ||
2184 | bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); | ||
2185 | 2360 | ||
2361 | error_out: | ||
2362 | lpfc_register_fcf(phba); | ||
2186 | out: | 2363 | out: |
2187 | lpfc_sli4_mbox_cmd_free(phba, mboxq); | 2364 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
2188 | lpfc_register_fcf(phba); | ||
2189 | } | 2365 | } |
2190 | 2366 | ||
2191 | /** | 2367 | /** |
@@ -2194,10 +2370,10 @@ out: | |||
2194 | * @mboxq: pointer to mailbox object. | 2370 | * @mboxq: pointer to mailbox object. |
2195 | * | 2371 | * |
2196 | * This is the callback function of read FCF record mailbox command for | 2372 | * This is the callback function of read FCF record mailbox command for |
2197 | * updating the eligible FCF bmask for FLOGI failure round robin FCF | 2373 | * updating the eligible FCF bmask for FLOGI failure roundrobin FCF |
2198 | * failover when a new FCF event happened. If the FCF read back is | 2374 | * failover when a new FCF event happened. If the FCF read back is |
2199 | * valid/available and it passes the connection list check, it updates | 2375 | * valid/available and it passes the connection list check, it updates |
2200 | * the bmask for the eligible FCF record for round robin failover. | 2376 | * the bmask for the eligible FCF record for roundrobin failover. |
2201 | */ | 2377 | */ |
2202 | void | 2378 | void |
2203 | lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | 2379 | lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) |
@@ -2639,7 +2815,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) | |||
2639 | * and get the FCF Table. | 2815 | * and get the FCF Table. |
2640 | */ | 2816 | */ |
2641 | spin_lock_irq(&phba->hbalock); | 2817 | spin_lock_irq(&phba->hbalock); |
2642 | if (phba->hba_flag & FCF_DISC_INPROGRESS) { | 2818 | if (phba->hba_flag & FCF_TS_INPROG) { |
2643 | spin_unlock_irq(&phba->hbalock); | 2819 | spin_unlock_irq(&phba->hbalock); |
2644 | return; | 2820 | return; |
2645 | } | 2821 | } |
@@ -3906,6 +4082,11 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport) | |||
3906 | LPFC_MBOXQ_t *mbox; | 4082 | LPFC_MBOXQ_t *mbox; |
3907 | int rc; | 4083 | int rc; |
3908 | 4084 | ||
4085 | if (phba->sli_rev == LPFC_SLI_REV4) { | ||
4086 | lpfc_sli4_unreg_all_rpis(vport); | ||
4087 | return; | ||
4088 | } | ||
4089 | |||
3909 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 4090 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
3910 | if (mbox) { | 4091 | if (mbox) { |
3911 | lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox); | 4092 | lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox); |
@@ -3992,6 +4173,16 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
3992 | } | 4173 | } |
3993 | 4174 | ||
3994 | spin_lock_irq(&phba->hbalock); | 4175 | spin_lock_irq(&phba->hbalock); |
4176 | /* Cleanup REG_LOGIN completions which are not yet processed */ | ||
4177 | list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { | ||
4178 | if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) || | ||
4179 | (ndlp != (struct lpfc_nodelist *) mb->context2)) | ||
4180 | continue; | ||
4181 | |||
4182 | mb->context2 = NULL; | ||
4183 | mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | ||
4184 | } | ||
4185 | |||
3995 | list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { | 4186 | list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { |
3996 | if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && | 4187 | if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && |
3997 | (ndlp == (struct lpfc_nodelist *) mb->context2)) { | 4188 | (ndlp == (struct lpfc_nodelist *) mb->context2)) { |
@@ -5170,6 +5361,8 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba) | |||
5170 | if (ndlp) | 5361 | if (ndlp) |
5171 | lpfc_cancel_retry_delay_tmo(vports[i], ndlp); | 5362 | lpfc_cancel_retry_delay_tmo(vports[i], ndlp); |
5172 | lpfc_cleanup_pending_mbox(vports[i]); | 5363 | lpfc_cleanup_pending_mbox(vports[i]); |
5364 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
5365 | lpfc_sli4_unreg_all_rpis(vports[i]); | ||
5173 | lpfc_mbx_unreg_vpi(vports[i]); | 5366 | lpfc_mbx_unreg_vpi(vports[i]); |
5174 | shost = lpfc_shost_from_vport(vports[i]); | 5367 | shost = lpfc_shost_from_vport(vports[i]); |
5175 | spin_lock_irq(shost->host_lock); | 5368 | spin_lock_irq(shost->host_lock); |
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index a631647051d9..9b8333456465 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h | |||
@@ -861,6 +861,47 @@ typedef struct _RPS_RSP { /* Structure is in Big Endian format */ | |||
861 | uint32_t crcCnt; | 861 | uint32_t crcCnt; |
862 | } RPS_RSP; | 862 | } RPS_RSP; |
863 | 863 | ||
864 | struct RLS { /* Structure is in Big Endian format */ | ||
865 | uint32_t rls; | ||
866 | #define rls_rsvd_SHIFT 24 | ||
867 | #define rls_rsvd_MASK 0x000000ff | ||
868 | #define rls_rsvd_WORD rls | ||
869 | #define rls_did_SHIFT 0 | ||
870 | #define rls_did_MASK 0x00ffffff | ||
871 | #define rls_did_WORD rls | ||
872 | }; | ||
873 | |||
874 | struct RLS_RSP { /* Structure is in Big Endian format */ | ||
875 | uint32_t linkFailureCnt; | ||
876 | uint32_t lossSyncCnt; | ||
877 | uint32_t lossSignalCnt; | ||
878 | uint32_t primSeqErrCnt; | ||
879 | uint32_t invalidXmitWord; | ||
880 | uint32_t crcCnt; | ||
881 | }; | ||
882 | |||
883 | struct RTV_RSP { /* Structure is in Big Endian format */ | ||
884 | uint32_t ratov; | ||
885 | uint32_t edtov; | ||
886 | uint32_t qtov; | ||
887 | #define qtov_rsvd0_SHIFT 28 | ||
888 | #define qtov_rsvd0_MASK 0x0000000f | ||
889 | #define qtov_rsvd0_WORD qtov /* reserved */ | ||
890 | #define qtov_edtovres_SHIFT 27 | ||
891 | #define qtov_edtovres_MASK 0x00000001 | ||
892 | #define qtov_edtovres_WORD qtov /* E_D_TOV Resolution */ | ||
893 | #define qtov__rsvd1_SHIFT 19 | ||
894 | #define qtov_rsvd1_MASK 0x0000003f | ||
895 | #define qtov_rsvd1_WORD qtov /* reserved */ | ||
896 | #define qtov_rttov_SHIFT 18 | ||
897 | #define qtov_rttov_MASK 0x00000001 | ||
898 | #define qtov_rttov_WORD qtov /* R_T_TOV value */ | ||
899 | #define qtov_rsvd2_SHIFT 0 | ||
900 | #define qtov_rsvd2_MASK 0x0003ffff | ||
901 | #define qtov_rsvd2_WORD qtov /* reserved */ | ||
902 | }; | ||
903 | |||
904 | |||
864 | typedef struct _RPL { /* Structure is in Big Endian format */ | 905 | typedef struct _RPL { /* Structure is in Big Endian format */ |
865 | uint32_t maxsize; | 906 | uint32_t maxsize; |
866 | uint32_t index; | 907 | uint32_t index; |
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index bbdcf96800f6..6e4bc34e1d0d 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h | |||
@@ -424,79 +424,6 @@ struct lpfc_rcqe { | |||
424 | #define FCOE_SOFn3 0x36 | 424 | #define FCOE_SOFn3 0x36 |
425 | }; | 425 | }; |
426 | 426 | ||
427 | struct lpfc_wqe_generic{ | ||
428 | struct ulp_bde64 bde; | ||
429 | uint32_t word3; | ||
430 | uint32_t word4; | ||
431 | uint32_t word5; | ||
432 | uint32_t word6; | ||
433 | #define lpfc_wqe_gen_context_SHIFT 16 | ||
434 | #define lpfc_wqe_gen_context_MASK 0x0000FFFF | ||
435 | #define lpfc_wqe_gen_context_WORD word6 | ||
436 | #define lpfc_wqe_gen_xri_SHIFT 0 | ||
437 | #define lpfc_wqe_gen_xri_MASK 0x0000FFFF | ||
438 | #define lpfc_wqe_gen_xri_WORD word6 | ||
439 | uint32_t word7; | ||
440 | #define lpfc_wqe_gen_lnk_SHIFT 23 | ||
441 | #define lpfc_wqe_gen_lnk_MASK 0x00000001 | ||
442 | #define lpfc_wqe_gen_lnk_WORD word7 | ||
443 | #define lpfc_wqe_gen_erp_SHIFT 22 | ||
444 | #define lpfc_wqe_gen_erp_MASK 0x00000001 | ||
445 | #define lpfc_wqe_gen_erp_WORD word7 | ||
446 | #define lpfc_wqe_gen_pu_SHIFT 20 | ||
447 | #define lpfc_wqe_gen_pu_MASK 0x00000003 | ||
448 | #define lpfc_wqe_gen_pu_WORD word7 | ||
449 | #define lpfc_wqe_gen_class_SHIFT 16 | ||
450 | #define lpfc_wqe_gen_class_MASK 0x00000007 | ||
451 | #define lpfc_wqe_gen_class_WORD word7 | ||
452 | #define lpfc_wqe_gen_command_SHIFT 8 | ||
453 | #define lpfc_wqe_gen_command_MASK 0x000000FF | ||
454 | #define lpfc_wqe_gen_command_WORD word7 | ||
455 | #define lpfc_wqe_gen_status_SHIFT 4 | ||
456 | #define lpfc_wqe_gen_status_MASK 0x0000000F | ||
457 | #define lpfc_wqe_gen_status_WORD word7 | ||
458 | #define lpfc_wqe_gen_ct_SHIFT 2 | ||
459 | #define lpfc_wqe_gen_ct_MASK 0x00000003 | ||
460 | #define lpfc_wqe_gen_ct_WORD word7 | ||
461 | uint32_t abort_tag; | ||
462 | uint32_t word9; | ||
463 | #define lpfc_wqe_gen_request_tag_SHIFT 0 | ||
464 | #define lpfc_wqe_gen_request_tag_MASK 0x0000FFFF | ||
465 | #define lpfc_wqe_gen_request_tag_WORD word9 | ||
466 | uint32_t word10; | ||
467 | #define lpfc_wqe_gen_ccp_SHIFT 24 | ||
468 | #define lpfc_wqe_gen_ccp_MASK 0x000000FF | ||
469 | #define lpfc_wqe_gen_ccp_WORD word10 | ||
470 | #define lpfc_wqe_gen_ccpe_SHIFT 23 | ||
471 | #define lpfc_wqe_gen_ccpe_MASK 0x00000001 | ||
472 | #define lpfc_wqe_gen_ccpe_WORD word10 | ||
473 | #define lpfc_wqe_gen_pv_SHIFT 19 | ||
474 | #define lpfc_wqe_gen_pv_MASK 0x00000001 | ||
475 | #define lpfc_wqe_gen_pv_WORD word10 | ||
476 | #define lpfc_wqe_gen_pri_SHIFT 16 | ||
477 | #define lpfc_wqe_gen_pri_MASK 0x00000007 | ||
478 | #define lpfc_wqe_gen_pri_WORD word10 | ||
479 | uint32_t word11; | ||
480 | #define lpfc_wqe_gen_cq_id_SHIFT 16 | ||
481 | #define lpfc_wqe_gen_cq_id_MASK 0x0000FFFF | ||
482 | #define lpfc_wqe_gen_cq_id_WORD word11 | ||
483 | #define LPFC_WQE_CQ_ID_DEFAULT 0xffff | ||
484 | #define lpfc_wqe_gen_wqec_SHIFT 7 | ||
485 | #define lpfc_wqe_gen_wqec_MASK 0x00000001 | ||
486 | #define lpfc_wqe_gen_wqec_WORD word11 | ||
487 | #define ELS_ID_FLOGI 3 | ||
488 | #define ELS_ID_FDISC 2 | ||
489 | #define ELS_ID_LOGO 1 | ||
490 | #define ELS_ID_DEFAULT 0 | ||
491 | #define lpfc_wqe_gen_els_id_SHIFT 4 | ||
492 | #define lpfc_wqe_gen_els_id_MASK 0x00000003 | ||
493 | #define lpfc_wqe_gen_els_id_WORD word11 | ||
494 | #define lpfc_wqe_gen_cmd_type_SHIFT 0 | ||
495 | #define lpfc_wqe_gen_cmd_type_MASK 0x0000000F | ||
496 | #define lpfc_wqe_gen_cmd_type_WORD word11 | ||
497 | uint32_t payload[4]; | ||
498 | }; | ||
499 | |||
500 | struct lpfc_rqe { | 427 | struct lpfc_rqe { |
501 | uint32_t address_hi; | 428 | uint32_t address_hi; |
502 | uint32_t address_lo; | 429 | uint32_t address_lo; |
@@ -2279,9 +2206,36 @@ struct wqe_common { | |||
2279 | #define wqe_reqtag_MASK 0x0000FFFF | 2206 | #define wqe_reqtag_MASK 0x0000FFFF |
2280 | #define wqe_reqtag_WORD word9 | 2207 | #define wqe_reqtag_WORD word9 |
2281 | #define wqe_rcvoxid_SHIFT 16 | 2208 | #define wqe_rcvoxid_SHIFT 16 |
2282 | #define wqe_rcvoxid_MASK 0x0000FFFF | 2209 | #define wqe_rcvoxid_MASK 0x0000FFFF |
2283 | #define wqe_rcvoxid_WORD word9 | 2210 | #define wqe_rcvoxid_WORD word9 |
2284 | uint32_t word10; | 2211 | uint32_t word10; |
2212 | #define wqe_ebde_cnt_SHIFT 0 | ||
2213 | #define wqe_ebde_cnt_MASK 0x00000007 | ||
2214 | #define wqe_ebde_cnt_WORD word10 | ||
2215 | #define wqe_lenloc_SHIFT 7 | ||
2216 | #define wqe_lenloc_MASK 0x00000003 | ||
2217 | #define wqe_lenloc_WORD word10 | ||
2218 | #define LPFC_WQE_LENLOC_NONE 0 | ||
2219 | #define LPFC_WQE_LENLOC_WORD3 1 | ||
2220 | #define LPFC_WQE_LENLOC_WORD12 2 | ||
2221 | #define LPFC_WQE_LENLOC_WORD4 3 | ||
2222 | #define wqe_qosd_SHIFT 9 | ||
2223 | #define wqe_qosd_MASK 0x00000001 | ||
2224 | #define wqe_qosd_WORD word10 | ||
2225 | #define wqe_xbl_SHIFT 11 | ||
2226 | #define wqe_xbl_MASK 0x00000001 | ||
2227 | #define wqe_xbl_WORD word10 | ||
2228 | #define wqe_iod_SHIFT 13 | ||
2229 | #define wqe_iod_MASK 0x00000001 | ||
2230 | #define wqe_iod_WORD word10 | ||
2231 | #define LPFC_WQE_IOD_WRITE 0 | ||
2232 | #define LPFC_WQE_IOD_READ 1 | ||
2233 | #define wqe_dbde_SHIFT 14 | ||
2234 | #define wqe_dbde_MASK 0x00000001 | ||
2235 | #define wqe_dbde_WORD word10 | ||
2236 | #define wqe_wqes_SHIFT 15 | ||
2237 | #define wqe_wqes_MASK 0x00000001 | ||
2238 | #define wqe_wqes_WORD word10 | ||
2285 | #define wqe_pri_SHIFT 16 | 2239 | #define wqe_pri_SHIFT 16 |
2286 | #define wqe_pri_MASK 0x00000007 | 2240 | #define wqe_pri_MASK 0x00000007 |
2287 | #define wqe_pri_WORD word10 | 2241 | #define wqe_pri_WORD word10 |
@@ -2295,18 +2249,26 @@ struct wqe_common { | |||
2295 | #define wqe_ccpe_MASK 0x00000001 | 2249 | #define wqe_ccpe_MASK 0x00000001 |
2296 | #define wqe_ccpe_WORD word10 | 2250 | #define wqe_ccpe_WORD word10 |
2297 | #define wqe_ccp_SHIFT 24 | 2251 | #define wqe_ccp_SHIFT 24 |
2298 | #define wqe_ccp_MASK 0x000000ff | 2252 | #define wqe_ccp_MASK 0x000000ff |
2299 | #define wqe_ccp_WORD word10 | 2253 | #define wqe_ccp_WORD word10 |
2300 | uint32_t word11; | 2254 | uint32_t word11; |
2301 | #define wqe_cmd_type_SHIFT 0 | 2255 | #define wqe_cmd_type_SHIFT 0 |
2302 | #define wqe_cmd_type_MASK 0x0000000f | 2256 | #define wqe_cmd_type_MASK 0x0000000f |
2303 | #define wqe_cmd_type_WORD word11 | 2257 | #define wqe_cmd_type_WORD word11 |
2304 | #define wqe_wqec_SHIFT 7 | 2258 | #define wqe_els_id_SHIFT 4 |
2305 | #define wqe_wqec_MASK 0x00000001 | 2259 | #define wqe_els_id_MASK 0x00000003 |
2306 | #define wqe_wqec_WORD word11 | 2260 | #define wqe_els_id_WORD word11 |
2307 | #define wqe_cqid_SHIFT 16 | 2261 | #define LPFC_ELS_ID_FLOGI 3 |
2308 | #define wqe_cqid_MASK 0x0000ffff | 2262 | #define LPFC_ELS_ID_FDISC 2 |
2309 | #define wqe_cqid_WORD word11 | 2263 | #define LPFC_ELS_ID_LOGO 1 |
2264 | #define LPFC_ELS_ID_DEFAULT 0 | ||
2265 | #define wqe_wqec_SHIFT 7 | ||
2266 | #define wqe_wqec_MASK 0x00000001 | ||
2267 | #define wqe_wqec_WORD word11 | ||
2268 | #define wqe_cqid_SHIFT 16 | ||
2269 | #define wqe_cqid_MASK 0x0000ffff | ||
2270 | #define wqe_cqid_WORD word11 | ||
2271 | #define LPFC_WQE_CQ_ID_DEFAULT 0xffff | ||
2310 | }; | 2272 | }; |
2311 | 2273 | ||
2312 | struct wqe_did { | 2274 | struct wqe_did { |
@@ -2325,6 +2287,15 @@ struct wqe_did { | |||
2325 | #define wqe_xmit_bls_xo_WORD word5 | 2287 | #define wqe_xmit_bls_xo_WORD word5 |
2326 | }; | 2288 | }; |
2327 | 2289 | ||
2290 | struct lpfc_wqe_generic{ | ||
2291 | struct ulp_bde64 bde; | ||
2292 | uint32_t word3; | ||
2293 | uint32_t word4; | ||
2294 | uint32_t word5; | ||
2295 | struct wqe_common wqe_com; | ||
2296 | uint32_t payload[4]; | ||
2297 | }; | ||
2298 | |||
2328 | struct els_request64_wqe { | 2299 | struct els_request64_wqe { |
2329 | struct ulp_bde64 bde; | 2300 | struct ulp_bde64 bde; |
2330 | uint32_t payload_len; | 2301 | uint32_t payload_len; |
@@ -2356,9 +2327,9 @@ struct els_request64_wqe { | |||
2356 | 2327 | ||
2357 | struct xmit_els_rsp64_wqe { | 2328 | struct xmit_els_rsp64_wqe { |
2358 | struct ulp_bde64 bde; | 2329 | struct ulp_bde64 bde; |
2359 | uint32_t rsvd3; | 2330 | uint32_t response_payload_len; |
2360 | uint32_t rsvd4; | 2331 | uint32_t rsvd4; |
2361 | struct wqe_did wqe_dest; | 2332 | struct wqe_did wqe_dest; |
2362 | struct wqe_common wqe_com; /* words 6-11 */ | 2333 | struct wqe_common wqe_com; /* words 6-11 */ |
2363 | uint32_t rsvd_12_15[4]; | 2334 | uint32_t rsvd_12_15[4]; |
2364 | }; | 2335 | }; |
@@ -2427,7 +2398,7 @@ struct wqe_rctl_dfctl { | |||
2427 | 2398 | ||
2428 | struct xmit_seq64_wqe { | 2399 | struct xmit_seq64_wqe { |
2429 | struct ulp_bde64 bde; | 2400 | struct ulp_bde64 bde; |
2430 | uint32_t paylaod_offset; | 2401 | uint32_t rsvd3; |
2431 | uint32_t relative_offset; | 2402 | uint32_t relative_offset; |
2432 | struct wqe_rctl_dfctl wge_ctl; | 2403 | struct wqe_rctl_dfctl wge_ctl; |
2433 | struct wqe_common wqe_com; /* words 6-11 */ | 2404 | struct wqe_common wqe_com; /* words 6-11 */ |
@@ -2437,7 +2408,7 @@ struct xmit_seq64_wqe { | |||
2437 | }; | 2408 | }; |
2438 | struct xmit_bcast64_wqe { | 2409 | struct xmit_bcast64_wqe { |
2439 | struct ulp_bde64 bde; | 2410 | struct ulp_bde64 bde; |
2440 | uint32_t paylaod_len; | 2411 | uint32_t seq_payload_len; |
2441 | uint32_t rsvd4; | 2412 | uint32_t rsvd4; |
2442 | struct wqe_rctl_dfctl wge_ctl; /* word 5 */ | 2413 | struct wqe_rctl_dfctl wge_ctl; /* word 5 */ |
2443 | struct wqe_common wqe_com; /* words 6-11 */ | 2414 | struct wqe_common wqe_com; /* words 6-11 */ |
@@ -2446,8 +2417,8 @@ struct xmit_bcast64_wqe { | |||
2446 | 2417 | ||
2447 | struct gen_req64_wqe { | 2418 | struct gen_req64_wqe { |
2448 | struct ulp_bde64 bde; | 2419 | struct ulp_bde64 bde; |
2449 | uint32_t command_len; | 2420 | uint32_t request_payload_len; |
2450 | uint32_t payload_len; | 2421 | uint32_t relative_offset; |
2451 | struct wqe_rctl_dfctl wge_ctl; /* word 5 */ | 2422 | struct wqe_rctl_dfctl wge_ctl; /* word 5 */ |
2452 | struct wqe_common wqe_com; /* words 6-11 */ | 2423 | struct wqe_common wqe_com; /* words 6-11 */ |
2453 | uint32_t rsvd_12_15[4]; | 2424 | uint32_t rsvd_12_15[4]; |
@@ -2480,7 +2451,7 @@ struct abort_cmd_wqe { | |||
2480 | 2451 | ||
2481 | struct fcp_iwrite64_wqe { | 2452 | struct fcp_iwrite64_wqe { |
2482 | struct ulp_bde64 bde; | 2453 | struct ulp_bde64 bde; |
2483 | uint32_t payload_len; | 2454 | uint32_t payload_offset_len; |
2484 | uint32_t total_xfer_len; | 2455 | uint32_t total_xfer_len; |
2485 | uint32_t initial_xfer_len; | 2456 | uint32_t initial_xfer_len; |
2486 | struct wqe_common wqe_com; /* words 6-11 */ | 2457 | struct wqe_common wqe_com; /* words 6-11 */ |
@@ -2489,7 +2460,7 @@ struct fcp_iwrite64_wqe { | |||
2489 | 2460 | ||
2490 | struct fcp_iread64_wqe { | 2461 | struct fcp_iread64_wqe { |
2491 | struct ulp_bde64 bde; | 2462 | struct ulp_bde64 bde; |
2492 | uint32_t payload_len; /* word 3 */ | 2463 | uint32_t payload_offset_len; /* word 3 */ |
2493 | uint32_t total_xfer_len; /* word 4 */ | 2464 | uint32_t total_xfer_len; /* word 4 */ |
2494 | uint32_t rsrvd5; /* word 5 */ | 2465 | uint32_t rsrvd5; /* word 5 */ |
2495 | struct wqe_common wqe_com; /* words 6-11 */ | 2466 | struct wqe_common wqe_com; /* words 6-11 */ |
@@ -2497,10 +2468,12 @@ struct fcp_iread64_wqe { | |||
2497 | }; | 2468 | }; |
2498 | 2469 | ||
2499 | struct fcp_icmnd64_wqe { | 2470 | struct fcp_icmnd64_wqe { |
2500 | struct ulp_bde64 bde; /* words 0-2 */ | 2471 | struct ulp_bde64 bde; /* words 0-2 */ |
2501 | uint32_t rsrvd[3]; /* words 3-5 */ | 2472 | uint32_t rsrvd3; /* word 3 */ |
2473 | uint32_t rsrvd4; /* word 4 */ | ||
2474 | uint32_t rsrvd5; /* word 5 */ | ||
2502 | struct wqe_common wqe_com; /* words 6-11 */ | 2475 | struct wqe_common wqe_com; /* words 6-11 */ |
2503 | uint32_t rsvd_12_15[4]; /* word 12-15 */ | 2476 | uint32_t rsvd_12_15[4]; /* word 12-15 */ |
2504 | }; | 2477 | }; |
2505 | 2478 | ||
2506 | 2479 | ||
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 295c7ddb36c1..b3065791f303 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
@@ -813,6 +813,7 @@ lpfc_hba_down_post_s3(struct lpfc_hba *phba) | |||
813 | 813 | ||
814 | return 0; | 814 | return 0; |
815 | } | 815 | } |
816 | |||
816 | /** | 817 | /** |
817 | * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset | 818 | * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset |
818 | * @phba: pointer to lpfc HBA data structure. | 819 | * @phba: pointer to lpfc HBA data structure. |
@@ -2234,10 +2235,9 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport) | |||
2234 | void | 2235 | void |
2235 | __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) | 2236 | __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) |
2236 | { | 2237 | { |
2237 | /* Clear pending FCF rediscovery wait and failover in progress flags */ | 2238 | /* Clear pending FCF rediscovery wait flag */ |
2238 | phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND | | 2239 | phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; |
2239 | FCF_DEAD_DISC | | 2240 | |
2240 | FCF_ACVL_DISC); | ||
2241 | /* Now, try to stop the timer */ | 2241 | /* Now, try to stop the timer */ |
2242 | del_timer(&phba->fcf.redisc_wait); | 2242 | del_timer(&phba->fcf.redisc_wait); |
2243 | } | 2243 | } |
@@ -2261,6 +2261,8 @@ lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) | |||
2261 | return; | 2261 | return; |
2262 | } | 2262 | } |
2263 | __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); | 2263 | __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); |
2264 | /* Clear failover in progress flags */ | ||
2265 | phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); | ||
2264 | spin_unlock_irq(&phba->hbalock); | 2266 | spin_unlock_irq(&phba->hbalock); |
2265 | } | 2267 | } |
2266 | 2268 | ||
@@ -2935,8 +2937,7 @@ lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) | |||
2935 | phba->fcf.fcf_flag |= FCF_REDISC_EVT; | 2937 | phba->fcf.fcf_flag |= FCF_REDISC_EVT; |
2936 | spin_unlock_irq(&phba->hbalock); | 2938 | spin_unlock_irq(&phba->hbalock); |
2937 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | 2939 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2938 | "2776 FCF rediscover wait timer expired, post " | 2940 | "2776 FCF rediscover quiescent timer expired\n"); |
2939 | "a worker thread event for FCF table scan\n"); | ||
2940 | /* wake up worker thread */ | 2941 | /* wake up worker thread */ |
2941 | lpfc_worker_wake_up(phba); | 2942 | lpfc_worker_wake_up(phba); |
2942 | } | 2943 | } |
@@ -3311,35 +3312,34 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, | |||
3311 | if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF) | 3312 | if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF) |
3312 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | | 3313 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | |
3313 | LOG_DISCOVERY, | 3314 | LOG_DISCOVERY, |
3314 | "2546 New FCF found event: " | 3315 | "2546 New FCF event, evt_tag:x%x, " |
3315 | "evt_tag:x%x, fcf_index:x%x\n", | 3316 | "index:x%x\n", |
3316 | acqe_fcoe->event_tag, | 3317 | acqe_fcoe->event_tag, |
3317 | acqe_fcoe->index); | 3318 | acqe_fcoe->index); |
3318 | else | 3319 | else |
3319 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | | 3320 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | |
3320 | LOG_DISCOVERY, | 3321 | LOG_DISCOVERY, |
3321 | "2788 FCF parameter modified event: " | 3322 | "2788 FCF param modified event, " |
3322 | "evt_tag:x%x, fcf_index:x%x\n", | 3323 | "evt_tag:x%x, index:x%x\n", |
3323 | acqe_fcoe->event_tag, | 3324 | acqe_fcoe->event_tag, |
3324 | acqe_fcoe->index); | 3325 | acqe_fcoe->index); |
3325 | if (phba->fcf.fcf_flag & FCF_DISCOVERY) { | 3326 | if (phba->fcf.fcf_flag & FCF_DISCOVERY) { |
3326 | /* | 3327 | /* |
3327 | * During period of FCF discovery, read the FCF | 3328 | * During period of FCF discovery, read the FCF |
3328 | * table record indexed by the event to update | 3329 | * table record indexed by the event to update |
3329 | * FCF round robin failover eligible FCF bmask. | 3330 | * FCF roundrobin failover eligible FCF bmask. |
3330 | */ | 3331 | */ |
3331 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | | 3332 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | |
3332 | LOG_DISCOVERY, | 3333 | LOG_DISCOVERY, |
3333 | "2779 Read new FCF record with " | 3334 | "2779 Read FCF (x%x) for updating " |
3334 | "fcf_index:x%x for updating FCF " | 3335 | "roundrobin FCF failover bmask\n", |
3335 | "round robin failover bmask\n", | ||
3336 | acqe_fcoe->index); | 3336 | acqe_fcoe->index); |
3337 | rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index); | 3337 | rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index); |
3338 | } | 3338 | } |
3339 | 3339 | ||
3340 | /* If the FCF discovery is in progress, do nothing. */ | 3340 | /* If the FCF discovery is in progress, do nothing. */ |
3341 | spin_lock_irq(&phba->hbalock); | 3341 | spin_lock_irq(&phba->hbalock); |
3342 | if (phba->hba_flag & FCF_DISC_INPROGRESS) { | 3342 | if (phba->hba_flag & FCF_TS_INPROG) { |
3343 | spin_unlock_irq(&phba->hbalock); | 3343 | spin_unlock_irq(&phba->hbalock); |
3344 | break; | 3344 | break; |
3345 | } | 3345 | } |
@@ -3358,15 +3358,15 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, | |||
3358 | 3358 | ||
3359 | /* Otherwise, scan the entire FCF table and re-discover SAN */ | 3359 | /* Otherwise, scan the entire FCF table and re-discover SAN */ |
3360 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, | 3360 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, |
3361 | "2770 Start FCF table scan due to new FCF " | 3361 | "2770 Start FCF table scan per async FCF " |
3362 | "event: evt_tag:x%x, fcf_index:x%x\n", | 3362 | "event, evt_tag:x%x, index:x%x\n", |
3363 | acqe_fcoe->event_tag, acqe_fcoe->index); | 3363 | acqe_fcoe->event_tag, acqe_fcoe->index); |
3364 | rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, | 3364 | rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, |
3365 | LPFC_FCOE_FCF_GET_FIRST); | 3365 | LPFC_FCOE_FCF_GET_FIRST); |
3366 | if (rc) | 3366 | if (rc) |
3367 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, | 3367 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, |
3368 | "2547 Issue FCF scan read FCF mailbox " | 3368 | "2547 Issue FCF scan read FCF mailbox " |
3369 | "command failed 0x%x\n", rc); | 3369 | "command failed (x%x)\n", rc); |
3370 | break; | 3370 | break; |
3371 | 3371 | ||
3372 | case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: | 3372 | case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: |
@@ -3378,9 +3378,8 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, | |||
3378 | 3378 | ||
3379 | case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: | 3379 | case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: |
3380 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, | 3380 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, |
3381 | "2549 FCF disconnected from network index 0x%x" | 3381 | "2549 FCF (x%x) disconnected from network, " |
3382 | " tag 0x%x\n", acqe_fcoe->index, | 3382 | "tag:x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); |
3383 | acqe_fcoe->event_tag); | ||
3384 | /* | 3383 | /* |
3385 | * If we are in the middle of FCF failover process, clear | 3384 | * If we are in the middle of FCF failover process, clear |
3386 | * the corresponding FCF bit in the roundrobin bitmap. | 3385 | * the corresponding FCF bit in the roundrobin bitmap. |
@@ -3494,9 +3493,8 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, | |||
3494 | spin_unlock_irq(&phba->hbalock); | 3493 | spin_unlock_irq(&phba->hbalock); |
3495 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | | 3494 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | |
3496 | LOG_DISCOVERY, | 3495 | LOG_DISCOVERY, |
3497 | "2773 Start FCF fast failover due " | 3496 | "2773 Start FCF failover per CVL, " |
3498 | "to CVL event: evt_tag:x%x\n", | 3497 | "evt_tag:x%x\n", acqe_fcoe->event_tag); |
3499 | acqe_fcoe->event_tag); | ||
3500 | rc = lpfc_sli4_redisc_fcf_table(phba); | 3498 | rc = lpfc_sli4_redisc_fcf_table(phba); |
3501 | if (rc) { | 3499 | if (rc) { |
3502 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | | 3500 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | |
@@ -3646,8 +3644,7 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) | |||
3646 | 3644 | ||
3647 | /* Scan FCF table from the first entry to re-discover SAN */ | 3645 | /* Scan FCF table from the first entry to re-discover SAN */ |
3648 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, | 3646 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, |
3649 | "2777 Start FCF table scan after FCF " | 3647 | "2777 Start post-quiescent FCF table scan\n"); |
3650 | "rediscovery quiescent period over\n"); | ||
3651 | rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); | 3648 | rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); |
3652 | if (rc) | 3649 | if (rc) |
3653 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, | 3650 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, |
@@ -4165,7 +4162,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | |||
4165 | goto out_free_active_sgl; | 4162 | goto out_free_active_sgl; |
4166 | } | 4163 | } |
4167 | 4164 | ||
4168 | /* Allocate eligible FCF bmask memory for FCF round robin failover */ | 4165 | /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ |
4169 | longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; | 4166 | longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; |
4170 | phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), | 4167 | phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), |
4171 | GFP_KERNEL); | 4168 | GFP_KERNEL); |
@@ -7271,6 +7268,51 @@ lpfc_sli4_unset_hba(struct lpfc_hba *phba) | |||
7271 | } | 7268 | } |
7272 | 7269 | ||
7273 | /** | 7270 | /** |
7271 | * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy | ||
7272 | * @phba: Pointer to HBA context object. | ||
7273 | * | ||
7274 | * This function is called in the SLI4 code path to wait for completion | ||
7275 | * of device's XRIs exchange busy. It will check the XRI exchange busy | ||
7276 | * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after | ||
7277 | * that, it will check the XRI exchange busy on outstanding FCP and ELS | ||
7278 | * I/Os every 30 seconds, log error message, and wait forever. Only when | ||
7279 | * all XRI exchange busy complete, the driver unload shall proceed with | ||
7280 | * invoking the function reset ioctl mailbox command to the CNA and the | ||
7281 | * the rest of the driver unload resource release. | ||
7282 | **/ | ||
7283 | static void | ||
7284 | lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) | ||
7285 | { | ||
7286 | int wait_time = 0; | ||
7287 | int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); | ||
7288 | int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); | ||
7289 | |||
7290 | while (!fcp_xri_cmpl || !els_xri_cmpl) { | ||
7291 | if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { | ||
7292 | if (!fcp_xri_cmpl) | ||
7293 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
7294 | "2877 FCP XRI exchange busy " | ||
7295 | "wait time: %d seconds.\n", | ||
7296 | wait_time/1000); | ||
7297 | if (!els_xri_cmpl) | ||
7298 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
7299 | "2878 ELS XRI exchange busy " | ||
7300 | "wait time: %d seconds.\n", | ||
7301 | wait_time/1000); | ||
7302 | msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); | ||
7303 | wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; | ||
7304 | } else { | ||
7305 | msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); | ||
7306 | wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; | ||
7307 | } | ||
7308 | fcp_xri_cmpl = | ||
7309 | list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); | ||
7310 | els_xri_cmpl = | ||
7311 | list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); | ||
7312 | } | ||
7313 | } | ||
7314 | |||
7315 | /** | ||
7274 | * lpfc_sli4_hba_unset - Unset the fcoe hba | 7316 | * lpfc_sli4_hba_unset - Unset the fcoe hba |
7275 | * @phba: Pointer to HBA context object. | 7317 | * @phba: Pointer to HBA context object. |
7276 | * | 7318 | * |
@@ -7315,6 +7357,12 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba) | |||
7315 | spin_unlock_irq(&phba->hbalock); | 7357 | spin_unlock_irq(&phba->hbalock); |
7316 | } | 7358 | } |
7317 | 7359 | ||
7360 | /* Abort all iocbs associated with the hba */ | ||
7361 | lpfc_sli_hba_iocb_abort(phba); | ||
7362 | |||
7363 | /* Wait for completion of device XRI exchange busy */ | ||
7364 | lpfc_sli4_xri_exchange_busy_wait(phba); | ||
7365 | |||
7318 | /* Disable PCI subsystem interrupt */ | 7366 | /* Disable PCI subsystem interrupt */ |
7319 | lpfc_sli4_disable_intr(phba); | 7367 | lpfc_sli4_disable_intr(phba); |
7320 | 7368 | ||
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 0dfa310cd609..62d0957e1d4c 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c | |||
@@ -797,6 +797,34 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi, | |||
797 | } | 797 | } |
798 | 798 | ||
799 | /** | 799 | /** |
800 | * lpfc_sli4_unreg_all_rpis - unregister all RPIs for a vport on SLI4 HBA. | ||
801 | * @vport: pointer to a vport object. | ||
802 | * | ||
803 | * This routine sends mailbox command to unregister all active RPIs for | ||
804 | * a vport. | ||
805 | **/ | ||
806 | void | ||
807 | lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport) | ||
808 | { | ||
809 | struct lpfc_hba *phba = vport->phba; | ||
810 | LPFC_MBOXQ_t *mbox; | ||
811 | int rc; | ||
812 | |||
813 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
814 | if (mbox) { | ||
815 | lpfc_unreg_login(phba, vport->vpi, | ||
816 | vport->vpi + phba->vpi_base, mbox); | ||
817 | mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000 ; | ||
818 | mbox->vport = vport; | ||
819 | mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | ||
820 | mbox->context1 = NULL; | ||
821 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); | ||
822 | if (rc == MBX_NOT_FINISHED) | ||
823 | mempool_free(mbox, phba->mbox_mem_pool); | ||
824 | } | ||
825 | } | ||
826 | |||
827 | /** | ||
800 | * lpfc_reg_vpi - Prepare a mailbox command for registering vport identifier | 828 | * lpfc_reg_vpi - Prepare a mailbox command for registering vport identifier |
801 | * @phba: pointer to lpfc hba data structure. | 829 | * @phba: pointer to lpfc hba data structure. |
802 | * @vpi: virtual N_Port identifier. | 830 | * @vpi: virtual N_Port identifier. |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 3a658953486c..581837b3c71a 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
@@ -169,6 +169,7 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) | |||
169 | spin_lock_irqsave(shost->host_lock, flags); | 169 | spin_lock_irqsave(shost->host_lock, flags); |
170 | if (!vport->stat_data_enabled || | 170 | if (!vport->stat_data_enabled || |
171 | vport->stat_data_blocked || | 171 | vport->stat_data_blocked || |
172 | !pnode || | ||
172 | !pnode->lat_data || | 173 | !pnode->lat_data || |
173 | (phba->bucket_type == LPFC_NO_BUCKET)) { | 174 | (phba->bucket_type == LPFC_NO_BUCKET)) { |
174 | spin_unlock_irqrestore(shost->host_lock, flags); | 175 | spin_unlock_irqrestore(shost->host_lock, flags); |
@@ -2040,6 +2041,9 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, | |||
2040 | struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode; | 2041 | struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode; |
2041 | unsigned long flags; | 2042 | unsigned long flags; |
2042 | 2043 | ||
2044 | if (!pnode || !NLP_CHK_NODE_ACT(pnode)) | ||
2045 | return; | ||
2046 | |||
2043 | /* If there is queuefull or busy condition send a scsi event */ | 2047 | /* If there is queuefull or busy condition send a scsi event */ |
2044 | if ((cmnd->result == SAM_STAT_TASK_SET_FULL) || | 2048 | if ((cmnd->result == SAM_STAT_TASK_SET_FULL) || |
2045 | (cmnd->result == SAM_STAT_BUSY)) { | 2049 | (cmnd->result == SAM_STAT_BUSY)) { |
@@ -2895,7 +2899,7 @@ void lpfc_poll_timeout(unsigned long ptr) | |||
2895 | * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily. | 2899 | * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily. |
2896 | **/ | 2900 | **/ |
2897 | static int | 2901 | static int |
2898 | lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | 2902 | lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) |
2899 | { | 2903 | { |
2900 | struct Scsi_Host *shost = cmnd->device->host; | 2904 | struct Scsi_Host *shost = cmnd->device->host; |
2901 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | 2905 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; |
@@ -3056,6 +3060,8 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
3056 | return 0; | 3060 | return 0; |
3057 | } | 3061 | } |
3058 | 3062 | ||
3063 | static DEF_SCSI_QCMD(lpfc_queuecommand) | ||
3064 | |||
3059 | /** | 3065 | /** |
3060 | * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point | 3066 | * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point |
3061 | * @cmnd: Pointer to scsi_cmnd data structure. | 3067 | * @cmnd: Pointer to scsi_cmnd data structure. |
@@ -3226,10 +3232,11 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, | |||
3226 | struct lpfc_scsi_buf *lpfc_cmd; | 3232 | struct lpfc_scsi_buf *lpfc_cmd; |
3227 | struct lpfc_iocbq *iocbq; | 3233 | struct lpfc_iocbq *iocbq; |
3228 | struct lpfc_iocbq *iocbqrsp; | 3234 | struct lpfc_iocbq *iocbqrsp; |
3235 | struct lpfc_nodelist *pnode = rdata->pnode; | ||
3229 | int ret; | 3236 | int ret; |
3230 | int status; | 3237 | int status; |
3231 | 3238 | ||
3232 | if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode)) | 3239 | if (!pnode || !NLP_CHK_NODE_ACT(pnode)) |
3233 | return FAILED; | 3240 | return FAILED; |
3234 | 3241 | ||
3235 | lpfc_cmd = lpfc_get_scsi_buf(phba); | 3242 | lpfc_cmd = lpfc_get_scsi_buf(phba); |
@@ -3256,7 +3263,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, | |||
3256 | "0702 Issue %s to TGT %d LUN %d " | 3263 | "0702 Issue %s to TGT %d LUN %d " |
3257 | "rpi x%x nlp_flag x%x\n", | 3264 | "rpi x%x nlp_flag x%x\n", |
3258 | lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, | 3265 | lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, |
3259 | rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); | 3266 | pnode->nlp_rpi, pnode->nlp_flag); |
3260 | 3267 | ||
3261 | status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, | 3268 | status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, |
3262 | iocbq, iocbqrsp, lpfc_cmd->timeout); | 3269 | iocbq, iocbqrsp, lpfc_cmd->timeout); |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index c5614cfcc6e9..22f17087883c 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -95,7 +95,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) | |||
95 | return -ENOMEM; | 95 | return -ENOMEM; |
96 | /* set consumption flag every once in a while */ | 96 | /* set consumption flag every once in a while */ |
97 | if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL)) | 97 | if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL)) |
98 | bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1); | 98 | bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); |
99 | 99 | ||
100 | lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); | 100 | lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); |
101 | 101 | ||
@@ -1735,6 +1735,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1735 | struct lpfc_vport *vport = pmb->vport; | 1735 | struct lpfc_vport *vport = pmb->vport; |
1736 | struct lpfc_dmabuf *mp; | 1736 | struct lpfc_dmabuf *mp; |
1737 | struct lpfc_nodelist *ndlp; | 1737 | struct lpfc_nodelist *ndlp; |
1738 | struct Scsi_Host *shost; | ||
1738 | uint16_t rpi, vpi; | 1739 | uint16_t rpi, vpi; |
1739 | int rc; | 1740 | int rc; |
1740 | 1741 | ||
@@ -1746,7 +1747,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1746 | } | 1747 | } |
1747 | 1748 | ||
1748 | if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) && | 1749 | if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) && |
1749 | (phba->sli_rev == LPFC_SLI_REV4)) | 1750 | (phba->sli_rev == LPFC_SLI_REV4) && |
1751 | (pmb->u.mb.un.varUnregLogin.rsvd1 == 0x0)) | ||
1750 | lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi); | 1752 | lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi); |
1751 | 1753 | ||
1752 | /* | 1754 | /* |
@@ -1765,16 +1767,14 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1765 | return; | 1767 | return; |
1766 | } | 1768 | } |
1767 | 1769 | ||
1768 | /* Unreg VPI, if the REG_VPI succeed after VLink failure */ | ||
1769 | if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && | 1770 | if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && |
1770 | !(phba->pport->load_flag & FC_UNLOADING) && | 1771 | !(phba->pport->load_flag & FC_UNLOADING) && |
1771 | !pmb->u.mb.mbxStatus) { | 1772 | !pmb->u.mb.mbxStatus) { |
1772 | lpfc_unreg_vpi(phba, pmb->u.mb.un.varRegVpi.vpi, pmb); | 1773 | shost = lpfc_shost_from_vport(vport); |
1773 | pmb->vport = vport; | 1774 | spin_lock_irq(shost->host_lock); |
1774 | pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | 1775 | vport->vpi_state |= LPFC_VPI_REGISTERED; |
1775 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); | 1776 | vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; |
1776 | if (rc != MBX_NOT_FINISHED) | 1777 | spin_unlock_irq(shost->host_lock); |
1777 | return; | ||
1778 | } | 1778 | } |
1779 | 1779 | ||
1780 | if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { | 1780 | if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { |
@@ -5921,7 +5921,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, | |||
5921 | * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution | 5921 | * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution |
5922 | * @phba: Pointer to HBA context object. | 5922 | * @phba: Pointer to HBA context object. |
5923 | * | 5923 | * |
5924 | * This routine performs a round robin SCSI command to SLI4 FCP WQ index | 5924 | * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index |
5925 | * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock | 5925 | * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock |
5926 | * held. | 5926 | * held. |
5927 | * | 5927 | * |
@@ -5965,7 +5965,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
5965 | uint16_t abrt_iotag; | 5965 | uint16_t abrt_iotag; |
5966 | struct lpfc_iocbq *abrtiocbq; | 5966 | struct lpfc_iocbq *abrtiocbq; |
5967 | struct ulp_bde64 *bpl = NULL; | 5967 | struct ulp_bde64 *bpl = NULL; |
5968 | uint32_t els_id = ELS_ID_DEFAULT; | 5968 | uint32_t els_id = LPFC_ELS_ID_DEFAULT; |
5969 | int numBdes, i; | 5969 | int numBdes, i; |
5970 | struct ulp_bde64 bde; | 5970 | struct ulp_bde64 bde; |
5971 | 5971 | ||
@@ -5982,7 +5982,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
5982 | memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); | 5982 | memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); |
5983 | abort_tag = (uint32_t) iocbq->iotag; | 5983 | abort_tag = (uint32_t) iocbq->iotag; |
5984 | xritag = iocbq->sli4_xritag; | 5984 | xritag = iocbq->sli4_xritag; |
5985 | wqe->words[7] = 0; /* The ct field has moved so reset */ | 5985 | wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ |
5986 | /* words0-2 bpl convert bde */ | 5986 | /* words0-2 bpl convert bde */ |
5987 | if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { | 5987 | if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { |
5988 | numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / | 5988 | numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / |
@@ -6033,109 +6033,117 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
6033 | * contains the FCFI and remote N_Port_ID is | 6033 | * contains the FCFI and remote N_Port_ID is |
6034 | * in word 5. | 6034 | * in word 5. |
6035 | */ | 6035 | */ |
6036 | |||
6037 | ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); | 6036 | ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); |
6038 | bf_set(lpfc_wqe_gen_context, &wqe->generic, | 6037 | bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, |
6039 | iocbq->iocb.ulpContext); | 6038 | iocbq->iocb.ulpContext); |
6040 | 6039 | bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); | |
6041 | bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct); | 6040 | bf_set(wqe_pu, &wqe->els_req.wqe_com, 0); |
6042 | bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); | ||
6043 | /* CCP CCPE PV PRI in word10 were set in the memcpy */ | 6041 | /* CCP CCPE PV PRI in word10 were set in the memcpy */ |
6044 | |||
6045 | if (command_type == ELS_COMMAND_FIP) { | 6042 | if (command_type == ELS_COMMAND_FIP) { |
6046 | els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) | 6043 | els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) |
6047 | >> LPFC_FIP_ELS_ID_SHIFT); | 6044 | >> LPFC_FIP_ELS_ID_SHIFT); |
6048 | } | 6045 | } |
6049 | bf_set(lpfc_wqe_gen_els_id, &wqe->generic, els_id); | 6046 | bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); |
6050 | 6047 | bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); | |
6048 | bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); | ||
6049 | bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); | ||
6050 | bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); | ||
6051 | bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); | ||
6051 | break; | 6052 | break; |
6052 | case CMD_XMIT_SEQUENCE64_CX: | 6053 | case CMD_XMIT_SEQUENCE64_CX: |
6053 | bf_set(lpfc_wqe_gen_context, &wqe->generic, | 6054 | bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, |
6054 | iocbq->iocb.un.ulpWord[3]); | 6055 | iocbq->iocb.un.ulpWord[3]); |
6055 | wqe->generic.word3 = 0; | 6056 | bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, |
6056 | bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext); | 6057 | iocbq->iocb.ulpContext); |
6057 | /* The entire sequence is transmitted for this IOCB */ | 6058 | /* The entire sequence is transmitted for this IOCB */ |
6058 | xmit_len = total_len; | 6059 | xmit_len = total_len; |
6059 | cmnd = CMD_XMIT_SEQUENCE64_CR; | 6060 | cmnd = CMD_XMIT_SEQUENCE64_CR; |
6060 | case CMD_XMIT_SEQUENCE64_CR: | 6061 | case CMD_XMIT_SEQUENCE64_CR: |
6061 | /* word3 iocb=io_tag32 wqe=payload_offset */ | 6062 | /* word3 iocb=io_tag32 wqe=reserved */ |
6062 | /* payload offset used for multilpe outstanding | 6063 | wqe->xmit_sequence.rsvd3 = 0; |
6063 | * sequences on the same exchange | ||
6064 | */ | ||
6065 | wqe->words[3] = 0; | ||
6066 | /* word4 relative_offset memcpy */ | 6064 | /* word4 relative_offset memcpy */ |
6067 | /* word5 r_ctl/df_ctl memcpy */ | 6065 | /* word5 r_ctl/df_ctl memcpy */ |
6068 | bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); | 6066 | bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); |
6067 | bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); | ||
6068 | bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, | ||
6069 | LPFC_WQE_IOD_WRITE); | ||
6070 | bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, | ||
6071 | LPFC_WQE_LENLOC_WORD12); | ||
6072 | bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); | ||
6069 | wqe->xmit_sequence.xmit_len = xmit_len; | 6073 | wqe->xmit_sequence.xmit_len = xmit_len; |
6070 | command_type = OTHER_COMMAND; | 6074 | command_type = OTHER_COMMAND; |
6071 | break; | 6075 | break; |
6072 | case CMD_XMIT_BCAST64_CN: | 6076 | case CMD_XMIT_BCAST64_CN: |
6073 | /* word3 iocb=iotag32 wqe=payload_len */ | 6077 | /* word3 iocb=iotag32 wqe=seq_payload_len */ |
6074 | wqe->words[3] = 0; /* no definition for this in wqe */ | 6078 | wqe->xmit_bcast64.seq_payload_len = xmit_len; |
6075 | /* word4 iocb=rsvd wqe=rsvd */ | 6079 | /* word4 iocb=rsvd wqe=rsvd */ |
6076 | /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ | 6080 | /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ |
6077 | /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ | 6081 | /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ |
6078 | bf_set(lpfc_wqe_gen_ct, &wqe->generic, | 6082 | bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com, |
6079 | ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); | 6083 | ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); |
6084 | bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1); | ||
6085 | bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE); | ||
6086 | bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com, | ||
6087 | LPFC_WQE_LENLOC_WORD3); | ||
6088 | bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0); | ||
6080 | break; | 6089 | break; |
6081 | case CMD_FCP_IWRITE64_CR: | 6090 | case CMD_FCP_IWRITE64_CR: |
6082 | command_type = FCP_COMMAND_DATA_OUT; | 6091 | command_type = FCP_COMMAND_DATA_OUT; |
6083 | /* The struct for wqe fcp_iwrite has 3 fields that are somewhat | 6092 | /* word3 iocb=iotag wqe=payload_offset_len */ |
6084 | * confusing. | 6093 | /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ |
6085 | * word3 is payload_len: byte offset to the sgl entry for the | 6094 | wqe->fcp_iwrite.payload_offset_len = |
6086 | * fcp_command. | 6095 | xmit_len + sizeof(struct fcp_rsp); |
6087 | * word4 is total xfer len, same as the IOCB->ulpParameter. | 6096 | /* word4 iocb=parameter wqe=total_xfer_length memcpy */ |
6088 | * word5 is initial xfer len 0 = wait for xfer-ready | 6097 | /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ |
6089 | */ | 6098 | bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, |
6090 | 6099 | iocbq->iocb.ulpFCP2Rcvy); | |
6091 | /* Always wait for xfer-ready before sending data */ | 6100 | bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); |
6092 | wqe->fcp_iwrite.initial_xfer_len = 0; | 6101 | /* Always open the exchange */ |
6093 | /* word 4 (xfer length) should have been set on the memcpy */ | 6102 | bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0); |
6094 | 6103 | bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); | |
6095 | /* allow write to fall through to read */ | 6104 | bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); |
6105 | bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, | ||
6106 | LPFC_WQE_LENLOC_WORD4); | ||
6107 | bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0); | ||
6108 | bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); | ||
6109 | break; | ||
6096 | case CMD_FCP_IREAD64_CR: | 6110 | case CMD_FCP_IREAD64_CR: |
6097 | /* FCP_CMD is always the 1st sgl entry */ | 6111 | /* word3 iocb=iotag wqe=payload_offset_len */ |
6098 | wqe->fcp_iread.payload_len = | 6112 | /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ |
6113 | wqe->fcp_iread.payload_offset_len = | ||
6099 | xmit_len + sizeof(struct fcp_rsp); | 6114 | xmit_len + sizeof(struct fcp_rsp); |
6100 | 6115 | /* word4 iocb=parameter wqe=total_xfer_length memcpy */ | |
6101 | /* word 4 (xfer length) should have been set on the memcpy */ | 6116 | /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ |
6102 | 6117 | bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, | |
6103 | bf_set(lpfc_wqe_gen_erp, &wqe->generic, | 6118 | iocbq->iocb.ulpFCP2Rcvy); |
6104 | iocbq->iocb.ulpFCP2Rcvy); | 6119 | bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); |
6105 | bf_set(lpfc_wqe_gen_lnk, &wqe->generic, iocbq->iocb.ulpXS); | ||
6106 | /* The XC bit and the XS bit are similar. The driver never | ||
6107 | * tracked whether or not the exchange was previouslly open. | ||
6108 | * XC = Exchange create, 0 is create. 1 is already open. | ||
6109 | * XS = link cmd: 1 do not close the exchange after command. | ||
6110 | * XS = 0 close exchange when command completes. | ||
6111 | * The only time we would not set the XC bit is when the XS bit | ||
6112 | * is set and we are sending our 2nd or greater command on | ||
6113 | * this exchange. | ||
6114 | */ | ||
6115 | /* Always open the exchange */ | 6120 | /* Always open the exchange */ |
6116 | bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); | 6121 | bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); |
6117 | 6122 | bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); | |
6118 | wqe->words[10] &= 0xffff0000; /* zero out ebde count */ | 6123 | bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); |
6119 | bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); | 6124 | bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, |
6120 | break; | 6125 | LPFC_WQE_LENLOC_WORD4); |
6126 | bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0); | ||
6127 | bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); | ||
6128 | break; | ||
6121 | case CMD_FCP_ICMND64_CR: | 6129 | case CMD_FCP_ICMND64_CR: |
6130 | /* word3 iocb=IO_TAG wqe=reserved */ | ||
6131 | wqe->fcp_icmd.rsrvd3 = 0; | ||
6132 | bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); | ||
6122 | /* Always open the exchange */ | 6133 | /* Always open the exchange */ |
6123 | bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); | 6134 | bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0); |
6124 | 6135 | bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1); | |
6125 | wqe->words[4] = 0; | 6136 | bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); |
6126 | wqe->words[10] &= 0xffff0000; /* zero out ebde count */ | 6137 | bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); |
6127 | bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); | 6138 | bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, |
6139 | LPFC_WQE_LENLOC_NONE); | ||
6140 | bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); | ||
6128 | break; | 6141 | break; |
6129 | case CMD_GEN_REQUEST64_CR: | 6142 | case CMD_GEN_REQUEST64_CR: |
6130 | /* word3 command length is described as byte offset to the | 6143 | /* word3 iocb=IO_TAG wqe=request_payload_len */ |
6131 | * rsp_data. Would always be 16, sizeof(struct sli4_sge) | 6144 | wqe->gen_req.request_payload_len = xmit_len; |
6132 | * sgl[0] = cmnd | 6145 | /* word4 iocb=parameter wqe=relative_offset memcpy */ |
6133 | * sgl[1] = rsp. | 6146 | /* word5 [rctl, type, df_ctl, la] copied in memcpy */ |
6134 | * | ||
6135 | */ | ||
6136 | wqe->gen_req.command_len = xmit_len; | ||
6137 | /* Word4 parameter copied in the memcpy */ | ||
6138 | /* Word5 [rctl, type, df_ctl, la] copied in memcpy */ | ||
6139 | /* word6 context tag copied in memcpy */ | 6147 | /* word6 context tag copied in memcpy */ |
6140 | if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { | 6148 | if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { |
6141 | ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); | 6149 | ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); |
@@ -6144,31 +6152,39 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
6144 | ct, iocbq->iocb.ulpCommand); | 6152 | ct, iocbq->iocb.ulpCommand); |
6145 | return IOCB_ERROR; | 6153 | return IOCB_ERROR; |
6146 | } | 6154 | } |
6147 | bf_set(lpfc_wqe_gen_ct, &wqe->generic, 0); | 6155 | bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0); |
6148 | bf_set(wqe_tmo, &wqe->gen_req.wqe_com, | 6156 | bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout); |
6149 | iocbq->iocb.ulpTimeout); | 6157 | bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU); |
6150 | 6158 | bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); | |
6151 | bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); | 6159 | bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); |
6160 | bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); | ||
6161 | bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); | ||
6162 | bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); | ||
6152 | command_type = OTHER_COMMAND; | 6163 | command_type = OTHER_COMMAND; |
6153 | break; | 6164 | break; |
6154 | case CMD_XMIT_ELS_RSP64_CX: | 6165 | case CMD_XMIT_ELS_RSP64_CX: |
6155 | /* words0-2 BDE memcpy */ | 6166 | /* words0-2 BDE memcpy */ |
6156 | /* word3 iocb=iotag32 wqe=rsvd */ | 6167 | /* word3 iocb=iotag32 wqe=response_payload_len */ |
6157 | wqe->words[3] = 0; | 6168 | wqe->xmit_els_rsp.response_payload_len = xmit_len; |
6158 | /* word4 iocb=did wge=rsvd. */ | 6169 | /* word4 iocb=did wge=rsvd. */ |
6159 | wqe->words[4] = 0; | 6170 | wqe->xmit_els_rsp.rsvd4 = 0; |
6160 | /* word5 iocb=rsvd wge=did */ | 6171 | /* word5 iocb=rsvd wge=did */ |
6161 | bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, | 6172 | bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, |
6162 | iocbq->iocb.un.elsreq64.remoteID); | 6173 | iocbq->iocb.un.elsreq64.remoteID); |
6163 | 6174 | bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, | |
6164 | bf_set(lpfc_wqe_gen_ct, &wqe->generic, | 6175 | ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); |
6165 | ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); | 6176 | bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); |
6166 | 6177 | bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, | |
6167 | bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); | 6178 | iocbq->iocb.ulpContext); |
6168 | bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext); | ||
6169 | if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) | 6179 | if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) |
6170 | bf_set(lpfc_wqe_gen_context, &wqe->generic, | 6180 | bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, |
6171 | iocbq->vport->vpi + phba->vpi_base); | 6181 | iocbq->vport->vpi + phba->vpi_base); |
6182 | bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); | ||
6183 | bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); | ||
6184 | bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); | ||
6185 | bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, | ||
6186 | LPFC_WQE_LENLOC_WORD3); | ||
6187 | bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); | ||
6172 | command_type = OTHER_COMMAND; | 6188 | command_type = OTHER_COMMAND; |
6173 | break; | 6189 | break; |
6174 | case CMD_CLOSE_XRI_CN: | 6190 | case CMD_CLOSE_XRI_CN: |
@@ -6193,15 +6209,19 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
6193 | else | 6209 | else |
6194 | bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); | 6210 | bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); |
6195 | bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); | 6211 | bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); |
6196 | wqe->words[5] = 0; | 6212 | /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */ |
6197 | bf_set(lpfc_wqe_gen_ct, &wqe->generic, | 6213 | wqe->abort_cmd.rsrvd5 = 0; |
6214 | bf_set(wqe_ct, &wqe->abort_cmd.wqe_com, | ||
6198 | ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); | 6215 | ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); |
6199 | abort_tag = iocbq->iocb.un.acxri.abortIoTag; | 6216 | abort_tag = iocbq->iocb.un.acxri.abortIoTag; |
6200 | /* | 6217 | /* |
6201 | * The abort handler will send us CMD_ABORT_XRI_CN or | 6218 | * The abort handler will send us CMD_ABORT_XRI_CN or |
6202 | * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX | 6219 | * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX |
6203 | */ | 6220 | */ |
6204 | bf_set(lpfc_wqe_gen_command, &wqe->generic, CMD_ABORT_XRI_CX); | 6221 | bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); |
6222 | bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); | ||
6223 | bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, | ||
6224 | LPFC_WQE_LENLOC_NONE); | ||
6205 | cmnd = CMD_ABORT_XRI_CX; | 6225 | cmnd = CMD_ABORT_XRI_CX; |
6206 | command_type = OTHER_COMMAND; | 6226 | command_type = OTHER_COMMAND; |
6207 | xritag = 0; | 6227 | xritag = 0; |
@@ -6235,18 +6255,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
6235 | bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); | 6255 | bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); |
6236 | bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, | 6256 | bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, |
6237 | iocbq->iocb.ulpContext); | 6257 | iocbq->iocb.ulpContext); |
6258 | bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); | ||
6259 | bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, | ||
6260 | LPFC_WQE_LENLOC_NONE); | ||
6238 | /* Overwrite the pre-set comnd type with OTHER_COMMAND */ | 6261 | /* Overwrite the pre-set comnd type with OTHER_COMMAND */ |
6239 | command_type = OTHER_COMMAND; | 6262 | command_type = OTHER_COMMAND; |
6240 | break; | 6263 | break; |
6241 | case CMD_XRI_ABORTED_CX: | 6264 | case CMD_XRI_ABORTED_CX: |
6242 | case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ | 6265 | case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ |
6243 | /* words0-2 are all 0's no bde */ | ||
6244 | /* word3 and word4 are rsvrd */ | ||
6245 | wqe->words[3] = 0; | ||
6246 | wqe->words[4] = 0; | ||
6247 | /* word5 iocb=rsvd wge=did */ | ||
6248 | /* There is no remote port id in the IOCB? */ | ||
6249 | /* Let this fall through and fail */ | ||
6250 | case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ | 6266 | case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ |
6251 | case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ | 6267 | case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ |
6252 | case CMD_FCP_TRSP64_CX: /* Target mode rcv */ | 6268 | case CMD_FCP_TRSP64_CX: /* Target mode rcv */ |
@@ -6257,16 +6273,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
6257 | iocbq->iocb.ulpCommand); | 6273 | iocbq->iocb.ulpCommand); |
6258 | return IOCB_ERROR; | 6274 | return IOCB_ERROR; |
6259 | break; | 6275 | break; |
6260 | |||
6261 | } | 6276 | } |
6262 | bf_set(lpfc_wqe_gen_xri, &wqe->generic, xritag); | 6277 | bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); |
6263 | bf_set(lpfc_wqe_gen_request_tag, &wqe->generic, iocbq->iotag); | 6278 | bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); |
6264 | wqe->generic.abort_tag = abort_tag; | 6279 | wqe->generic.wqe_com.abort_tag = abort_tag; |
6265 | bf_set(lpfc_wqe_gen_cmd_type, &wqe->generic, command_type); | 6280 | bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); |
6266 | bf_set(lpfc_wqe_gen_command, &wqe->generic, cmnd); | 6281 | bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd); |
6267 | bf_set(lpfc_wqe_gen_class, &wqe->generic, iocbq->iocb.ulpClass); | 6282 | bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass); |
6268 | bf_set(lpfc_wqe_gen_cq_id, &wqe->generic, LPFC_WQE_CQ_ID_DEFAULT); | 6283 | bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); |
6269 | |||
6270 | return 0; | 6284 | return 0; |
6271 | } | 6285 | } |
6272 | 6286 | ||
@@ -7257,25 +7271,26 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
7257 | } | 7271 | } |
7258 | 7272 | ||
7259 | /** | 7273 | /** |
7260 | * lpfc_sli_issue_abort_iotag - Abort function for a command iocb | 7274 | * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb |
7261 | * @phba: Pointer to HBA context object. | 7275 | * @phba: Pointer to HBA context object. |
7262 | * @pring: Pointer to driver SLI ring object. | 7276 | * @pring: Pointer to driver SLI ring object. |
7263 | * @cmdiocb: Pointer to driver command iocb object. | 7277 | * @cmdiocb: Pointer to driver command iocb object. |
7264 | * | 7278 | * |
7265 | * This function issues an abort iocb for the provided command | 7279 | * This function issues an abort iocb for the provided command iocb down to |
7266 | * iocb. This function is called with hbalock held. | 7280 | * the port. Other than the case the outstanding command iocb is an abort |
7267 | * The function returns 0 when it fails due to memory allocation | 7281 | * request, this function issues abort out unconditionally. This function is |
7268 | * failure or when the command iocb is an abort request. | 7282 | * called with hbalock held. The function returns 0 when it fails due to |
7283 | * memory allocation failure or when the command iocb is an abort request. | ||
7269 | **/ | 7284 | **/ |
7270 | int | 7285 | static int |
7271 | lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | 7286 | lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
7272 | struct lpfc_iocbq *cmdiocb) | 7287 | struct lpfc_iocbq *cmdiocb) |
7273 | { | 7288 | { |
7274 | struct lpfc_vport *vport = cmdiocb->vport; | 7289 | struct lpfc_vport *vport = cmdiocb->vport; |
7275 | struct lpfc_iocbq *abtsiocbp; | 7290 | struct lpfc_iocbq *abtsiocbp; |
7276 | IOCB_t *icmd = NULL; | 7291 | IOCB_t *icmd = NULL; |
7277 | IOCB_t *iabt = NULL; | 7292 | IOCB_t *iabt = NULL; |
7278 | int retval = IOCB_ERROR; | 7293 | int retval; |
7279 | 7294 | ||
7280 | /* | 7295 | /* |
7281 | * There are certain command types we don't want to abort. And we | 7296 | * There are certain command types we don't want to abort. And we |
@@ -7288,18 +7303,6 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
7288 | (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) | 7303 | (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) |
7289 | return 0; | 7304 | return 0; |
7290 | 7305 | ||
7291 | /* If we're unloading, don't abort iocb on the ELS ring, but change the | ||
7292 | * callback so that nothing happens when it finishes. | ||
7293 | */ | ||
7294 | if ((vport->load_flag & FC_UNLOADING) && | ||
7295 | (pring->ringno == LPFC_ELS_RING)) { | ||
7296 | if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) | ||
7297 | cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; | ||
7298 | else | ||
7299 | cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; | ||
7300 | goto abort_iotag_exit; | ||
7301 | } | ||
7302 | |||
7303 | /* issue ABTS for this IOCB based on iotag */ | 7306 | /* issue ABTS for this IOCB based on iotag */ |
7304 | abtsiocbp = __lpfc_sli_get_iocbq(phba); | 7307 | abtsiocbp = __lpfc_sli_get_iocbq(phba); |
7305 | if (abtsiocbp == NULL) | 7308 | if (abtsiocbp == NULL) |
@@ -7344,6 +7347,63 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
7344 | 7347 | ||
7345 | if (retval) | 7348 | if (retval) |
7346 | __lpfc_sli_release_iocbq(phba, abtsiocbp); | 7349 | __lpfc_sli_release_iocbq(phba, abtsiocbp); |
7350 | |||
7351 | /* | ||
7352 | * Caller to this routine should check for IOCB_ERROR | ||
7353 | * and handle it properly. This routine no longer removes | ||
7354 | * iocb off txcmplq and call compl in case of IOCB_ERROR. | ||
7355 | */ | ||
7356 | return retval; | ||
7357 | } | ||
7358 | |||
7359 | /** | ||
7360 | * lpfc_sli_issue_abort_iotag - Abort function for a command iocb | ||
7361 | * @phba: Pointer to HBA context object. | ||
7362 | * @pring: Pointer to driver SLI ring object. | ||
7363 | * @cmdiocb: Pointer to driver command iocb object. | ||
7364 | * | ||
7365 | * This function issues an abort iocb for the provided command iocb. In case | ||
7366 | * of unloading, the abort iocb will not be issued to commands on the ELS | ||
7367 | * ring. Instead, the callback function shall be changed to those commands | ||
7368 | * so that nothing happens when them finishes. This function is called with | ||
7369 | * hbalock held. The function returns 0 when the command iocb is an abort | ||
7370 | * request. | ||
7371 | **/ | ||
7372 | int | ||
7373 | lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | ||
7374 | struct lpfc_iocbq *cmdiocb) | ||
7375 | { | ||
7376 | struct lpfc_vport *vport = cmdiocb->vport; | ||
7377 | int retval = IOCB_ERROR; | ||
7378 | IOCB_t *icmd = NULL; | ||
7379 | |||
7380 | /* | ||
7381 | * There are certain command types we don't want to abort. And we | ||
7382 | * don't want to abort commands that are already in the process of | ||
7383 | * being aborted. | ||
7384 | */ | ||
7385 | icmd = &cmdiocb->iocb; | ||
7386 | if (icmd->ulpCommand == CMD_ABORT_XRI_CN || | ||
7387 | icmd->ulpCommand == CMD_CLOSE_XRI_CN || | ||
7388 | (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) | ||
7389 | return 0; | ||
7390 | |||
7391 | /* | ||
7392 | * If we're unloading, don't abort iocb on the ELS ring, but change | ||
7393 | * the callback so that nothing happens when it finishes. | ||
7394 | */ | ||
7395 | if ((vport->load_flag & FC_UNLOADING) && | ||
7396 | (pring->ringno == LPFC_ELS_RING)) { | ||
7397 | if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) | ||
7398 | cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; | ||
7399 | else | ||
7400 | cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; | ||
7401 | goto abort_iotag_exit; | ||
7402 | } | ||
7403 | |||
7404 | /* Now, we try to issue the abort to the cmdiocb out */ | ||
7405 | retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb); | ||
7406 | |||
7347 | abort_iotag_exit: | 7407 | abort_iotag_exit: |
7348 | /* | 7408 | /* |
7349 | * Caller to this routine should check for IOCB_ERROR | 7409 | * Caller to this routine should check for IOCB_ERROR |
@@ -7354,6 +7414,62 @@ abort_iotag_exit: | |||
7354 | } | 7414 | } |
7355 | 7415 | ||
7356 | /** | 7416 | /** |
7417 | * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring | ||
7418 | * @phba: Pointer to HBA context object. | ||
7419 | * @pring: Pointer to driver SLI ring object. | ||
7420 | * | ||
7421 | * This function aborts all iocbs in the given ring and frees all the iocb | ||
7422 | * objects in txq. This function issues abort iocbs unconditionally for all | ||
7423 | * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed | ||
7424 | * to complete before the return of this function. The caller is not required | ||
7425 | * to hold any locks. | ||
7426 | **/ | ||
7427 | static void | ||
7428 | lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) | ||
7429 | { | ||
7430 | LIST_HEAD(completions); | ||
7431 | struct lpfc_iocbq *iocb, *next_iocb; | ||
7432 | |||
7433 | if (pring->ringno == LPFC_ELS_RING) | ||
7434 | lpfc_fabric_abort_hba(phba); | ||
7435 | |||
7436 | spin_lock_irq(&phba->hbalock); | ||
7437 | |||
7438 | /* Take off all the iocbs on txq for cancelling */ | ||
7439 | list_splice_init(&pring->txq, &completions); | ||
7440 | pring->txq_cnt = 0; | ||
7441 | |||
7442 | /* Next issue ABTS for everything on the txcmplq */ | ||
7443 | list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) | ||
7444 | lpfc_sli_abort_iotag_issue(phba, pring, iocb); | ||
7445 | |||
7446 | spin_unlock_irq(&phba->hbalock); | ||
7447 | |||
7448 | /* Cancel all the IOCBs from the completions list */ | ||
7449 | lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, | ||
7450 | IOERR_SLI_ABORTED); | ||
7451 | } | ||
7452 | |||
7453 | /** | ||
7454 | * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. | ||
7455 | * @phba: pointer to lpfc HBA data structure. | ||
7456 | * | ||
7457 | * This routine will abort all pending and outstanding iocbs to an HBA. | ||
7458 | **/ | ||
7459 | void | ||
7460 | lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) | ||
7461 | { | ||
7462 | struct lpfc_sli *psli = &phba->sli; | ||
7463 | struct lpfc_sli_ring *pring; | ||
7464 | int i; | ||
7465 | |||
7466 | for (i = 0; i < psli->num_rings; i++) { | ||
7467 | pring = &psli->ring[i]; | ||
7468 | lpfc_sli_iocb_ring_abort(phba, pring); | ||
7469 | } | ||
7470 | } | ||
7471 | |||
7472 | /** | ||
7357 | * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN | 7473 | * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN |
7358 | * @iocbq: Pointer to driver iocb object. | 7474 | * @iocbq: Pointer to driver iocb object. |
7359 | * @vport: Pointer to driver virtual port object. | 7475 | * @vport: Pointer to driver virtual port object. |
@@ -12242,13 +12358,15 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) | |||
12242 | /* Issue the mailbox command asynchronously */ | 12358 | /* Issue the mailbox command asynchronously */ |
12243 | mboxq->vport = phba->pport; | 12359 | mboxq->vport = phba->pport; |
12244 | mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; | 12360 | mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; |
12361 | |||
12362 | spin_lock_irq(&phba->hbalock); | ||
12363 | phba->hba_flag |= FCF_TS_INPROG; | ||
12364 | spin_unlock_irq(&phba->hbalock); | ||
12365 | |||
12245 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); | 12366 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); |
12246 | if (rc == MBX_NOT_FINISHED) | 12367 | if (rc == MBX_NOT_FINISHED) |
12247 | error = -EIO; | 12368 | error = -EIO; |
12248 | else { | 12369 | else { |
12249 | spin_lock_irq(&phba->hbalock); | ||
12250 | phba->hba_flag |= FCF_DISC_INPROGRESS; | ||
12251 | spin_unlock_irq(&phba->hbalock); | ||
12252 | /* Reset eligible FCF count for new scan */ | 12370 | /* Reset eligible FCF count for new scan */ |
12253 | if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) | 12371 | if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) |
12254 | phba->fcf.eligible_fcf_cnt = 0; | 12372 | phba->fcf.eligible_fcf_cnt = 0; |
@@ -12258,21 +12376,21 @@ fail_fcf_scan: | |||
12258 | if (error) { | 12376 | if (error) { |
12259 | if (mboxq) | 12377 | if (mboxq) |
12260 | lpfc_sli4_mbox_cmd_free(phba, mboxq); | 12378 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
12261 | /* FCF scan failed, clear FCF_DISC_INPROGRESS flag */ | 12379 | /* FCF scan failed, clear FCF_TS_INPROG flag */ |
12262 | spin_lock_irq(&phba->hbalock); | 12380 | spin_lock_irq(&phba->hbalock); |
12263 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; | 12381 | phba->hba_flag &= ~FCF_TS_INPROG; |
12264 | spin_unlock_irq(&phba->hbalock); | 12382 | spin_unlock_irq(&phba->hbalock); |
12265 | } | 12383 | } |
12266 | return error; | 12384 | return error; |
12267 | } | 12385 | } |
12268 | 12386 | ||
12269 | /** | 12387 | /** |
12270 | * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for round robin fcf. | 12388 | * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. |
12271 | * @phba: pointer to lpfc hba data structure. | 12389 | * @phba: pointer to lpfc hba data structure. |
12272 | * @fcf_index: FCF table entry offset. | 12390 | * @fcf_index: FCF table entry offset. |
12273 | * | 12391 | * |
12274 | * This routine is invoked to read an FCF record indicated by @fcf_index | 12392 | * This routine is invoked to read an FCF record indicated by @fcf_index |
12275 | * and to use it for FLOGI round robin FCF failover. | 12393 | * and to use it for FLOGI roundrobin FCF failover. |
12276 | * | 12394 | * |
12277 | * Return 0 if the mailbox command is submitted sucessfully, none 0 | 12395 | * Return 0 if the mailbox command is submitted sucessfully, none 0 |
12278 | * otherwise. | 12396 | * otherwise. |
@@ -12318,7 +12436,7 @@ fail_fcf_read: | |||
12318 | * @fcf_index: FCF table entry offset. | 12436 | * @fcf_index: FCF table entry offset. |
12319 | * | 12437 | * |
12320 | * This routine is invoked to read an FCF record indicated by @fcf_index to | 12438 | * This routine is invoked to read an FCF record indicated by @fcf_index to |
12321 | * determine whether it's eligible for FLOGI round robin failover list. | 12439 | * determine whether it's eligible for FLOGI roundrobin failover list. |
12322 | * | 12440 | * |
12323 | * Return 0 if the mailbox command is submitted sucessfully, none 0 | 12441 | * Return 0 if the mailbox command is submitted sucessfully, none 0 |
12324 | * otherwise. | 12442 | * otherwise. |
@@ -12364,7 +12482,7 @@ fail_fcf_read: | |||
12364 | * | 12482 | * |
12365 | * This routine is to get the next eligible FCF record index in a round | 12483 | * This routine is to get the next eligible FCF record index in a round |
12366 | * robin fashion. If the next eligible FCF record index equals to the | 12484 | * robin fashion. If the next eligible FCF record index equals to the |
12367 | * initial round robin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) | 12485 | * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) |
12368 | * shall be returned, otherwise, the next eligible FCF record's index | 12486 | * shall be returned, otherwise, the next eligible FCF record's index |
12369 | * shall be returned. | 12487 | * shall be returned. |
12370 | **/ | 12488 | **/ |
@@ -12392,28 +12510,10 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) | |||
12392 | return LPFC_FCOE_FCF_NEXT_NONE; | 12510 | return LPFC_FCOE_FCF_NEXT_NONE; |
12393 | } | 12511 | } |
12394 | 12512 | ||
12395 | /* Check roundrobin failover index bmask stop condition */ | ||
12396 | if (next_fcf_index == phba->fcf.fcf_rr_init_indx) { | ||
12397 | if (!(phba->fcf.fcf_flag & FCF_REDISC_RRU)) { | ||
12398 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, | ||
12399 | "2847 Round robin failover FCF index " | ||
12400 | "search hit stop condition:x%x\n", | ||
12401 | next_fcf_index); | ||
12402 | return LPFC_FCOE_FCF_NEXT_NONE; | ||
12403 | } | ||
12404 | /* The roundrobin failover index bmask updated, start over */ | ||
12405 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | ||
12406 | "2848 Round robin failover FCF index bmask " | ||
12407 | "updated, start over\n"); | ||
12408 | spin_lock_irq(&phba->hbalock); | ||
12409 | phba->fcf.fcf_flag &= ~FCF_REDISC_RRU; | ||
12410 | spin_unlock_irq(&phba->hbalock); | ||
12411 | return phba->fcf.fcf_rr_init_indx; | ||
12412 | } | ||
12413 | |||
12414 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | 12513 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
12415 | "2845 Get next round robin failover " | 12514 | "2845 Get next roundrobin failover FCF (x%x)\n", |
12416 | "FCF index x%x\n", next_fcf_index); | 12515 | next_fcf_index); |
12516 | |||
12417 | return next_fcf_index; | 12517 | return next_fcf_index; |
12418 | } | 12518 | } |
12419 | 12519 | ||
@@ -12422,7 +12522,7 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) | |||
12422 | * @phba: pointer to lpfc hba data structure. | 12522 | * @phba: pointer to lpfc hba data structure. |
12423 | * | 12523 | * |
12424 | * This routine sets the FCF record index in to the eligible bmask for | 12524 | * This routine sets the FCF record index in to the eligible bmask for |
12425 | * round robin failover search. It checks to make sure that the index | 12525 | * roundrobin failover search. It checks to make sure that the index |
12426 | * does not go beyond the range of the driver allocated bmask dimension | 12526 | * does not go beyond the range of the driver allocated bmask dimension |
12427 | * before setting the bit. | 12527 | * before setting the bit. |
12428 | * | 12528 | * |
@@ -12434,22 +12534,16 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) | |||
12434 | { | 12534 | { |
12435 | if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { | 12535 | if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { |
12436 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP, | 12536 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP, |
12437 | "2610 HBA FCF index reached driver's " | 12537 | "2610 FCF (x%x) reached driver's book " |
12438 | "book keeping dimension: fcf_index:%d, " | 12538 | "keeping dimension:x%x\n", |
12439 | "driver_bmask_max:%d\n", | ||
12440 | fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); | 12539 | fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); |
12441 | return -EINVAL; | 12540 | return -EINVAL; |
12442 | } | 12541 | } |
12443 | /* Set the eligible FCF record index bmask */ | 12542 | /* Set the eligible FCF record index bmask */ |
12444 | set_bit(fcf_index, phba->fcf.fcf_rr_bmask); | 12543 | set_bit(fcf_index, phba->fcf.fcf_rr_bmask); |
12445 | 12544 | ||
12446 | /* Set the roundrobin index bmask updated */ | ||
12447 | spin_lock_irq(&phba->hbalock); | ||
12448 | phba->fcf.fcf_flag |= FCF_REDISC_RRU; | ||
12449 | spin_unlock_irq(&phba->hbalock); | ||
12450 | |||
12451 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | 12545 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
12452 | "2790 Set FCF index x%x to round robin failover " | 12546 | "2790 Set FCF (x%x) to roundrobin FCF failover " |
12453 | "bmask\n", fcf_index); | 12547 | "bmask\n", fcf_index); |
12454 | 12548 | ||
12455 | return 0; | 12549 | return 0; |
@@ -12460,7 +12554,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) | |||
12460 | * @phba: pointer to lpfc hba data structure. | 12554 | * @phba: pointer to lpfc hba data structure. |
12461 | * | 12555 | * |
12462 | * This routine clears the FCF record index from the eligible bmask for | 12556 | * This routine clears the FCF record index from the eligible bmask for |
12463 | * round robin failover search. It checks to make sure that the index | 12557 | * roundrobin failover search. It checks to make sure that the index |
12464 | * does not go beyond the range of the driver allocated bmask dimension | 12558 | * does not go beyond the range of the driver allocated bmask dimension |
12465 | * before clearing the bit. | 12559 | * before clearing the bit. |
12466 | **/ | 12560 | **/ |
@@ -12469,9 +12563,8 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) | |||
12469 | { | 12563 | { |
12470 | if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { | 12564 | if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { |
12471 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP, | 12565 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP, |
12472 | "2762 HBA FCF index goes beyond driver's " | 12566 | "2762 FCF (x%x) reached driver's book " |
12473 | "book keeping dimension: fcf_index:%d, " | 12567 | "keeping dimension:x%x\n", |
12474 | "driver_bmask_max:%d\n", | ||
12475 | fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); | 12568 | fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); |
12476 | return; | 12569 | return; |
12477 | } | 12570 | } |
@@ -12479,7 +12572,7 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) | |||
12479 | clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); | 12572 | clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); |
12480 | 12573 | ||
12481 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | 12574 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
12482 | "2791 Clear FCF index x%x from round robin failover " | 12575 | "2791 Clear FCF (x%x) from roundrobin failover " |
12483 | "bmask\n", fcf_index); | 12576 | "bmask\n", fcf_index); |
12484 | } | 12577 | } |
12485 | 12578 | ||
@@ -12530,8 +12623,7 @@ lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) | |||
12530 | } | 12623 | } |
12531 | } else { | 12624 | } else { |
12532 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | 12625 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
12533 | "2775 Start FCF rediscovery quiescent period " | 12626 | "2775 Start FCF rediscover quiescent timer\n"); |
12534 | "wait timer before scaning FCF table\n"); | ||
12535 | /* | 12627 | /* |
12536 | * Start FCF rediscovery wait timer for pending FCF | 12628 | * Start FCF rediscovery wait timer for pending FCF |
12537 | * before rescan FCF record table. | 12629 | * before rescan FCF record table. |
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index a0ca572ec28b..c4483feb8b71 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h | |||
@@ -19,10 +19,16 @@ | |||
19 | *******************************************************************/ | 19 | *******************************************************************/ |
20 | 20 | ||
21 | #define LPFC_ACTIVE_MBOX_WAIT_CNT 100 | 21 | #define LPFC_ACTIVE_MBOX_WAIT_CNT 100 |
22 | #define LPFC_XRI_EXCH_BUSY_WAIT_TMO 10000 | ||
23 | #define LPFC_XRI_EXCH_BUSY_WAIT_T1 10 | ||
24 | #define LPFC_XRI_EXCH_BUSY_WAIT_T2 30000 | ||
22 | #define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 | 25 | #define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 |
23 | #define LPFC_GET_QE_REL_INT 32 | 26 | #define LPFC_GET_QE_REL_INT 32 |
24 | #define LPFC_RPI_LOW_WATER_MARK 10 | 27 | #define LPFC_RPI_LOW_WATER_MARK 10 |
25 | 28 | ||
29 | #define LPFC_UNREG_FCF 1 | ||
30 | #define LPFC_SKIP_UNREG_FCF 0 | ||
31 | |||
26 | /* Amount of time in seconds for waiting FCF rediscovery to complete */ | 32 | /* Amount of time in seconds for waiting FCF rediscovery to complete */ |
27 | #define LPFC_FCF_REDISCOVER_WAIT_TMO 2000 /* msec */ | 33 | #define LPFC_FCF_REDISCOVER_WAIT_TMO 2000 /* msec */ |
28 | 34 | ||
@@ -163,9 +169,8 @@ struct lpfc_fcf { | |||
163 | #define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */ | 169 | #define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */ |
164 | #define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */ | 170 | #define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */ |
165 | #define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */ | 171 | #define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */ |
166 | #define FCF_REDISC_RRU 0x400 /* Roundrobin bitmap updated */ | 172 | #define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT) |
167 | uint32_t addr_mode; | 173 | uint32_t addr_mode; |
168 | uint16_t fcf_rr_init_indx; | ||
169 | uint32_t eligible_fcf_cnt; | 174 | uint32_t eligible_fcf_cnt; |
170 | struct lpfc_fcf_rec current_rec; | 175 | struct lpfc_fcf_rec current_rec; |
171 | struct lpfc_fcf_rec failover_rec; | 176 | struct lpfc_fcf_rec failover_rec; |
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index f93120e4c796..7a1b5b112a0b 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h | |||
@@ -18,7 +18,7 @@ | |||
18 | * included with this package. * | 18 | * included with this package. * |
19 | *******************************************************************/ | 19 | *******************************************************************/ |
20 | 20 | ||
21 | #define LPFC_DRIVER_VERSION "8.3.17" | 21 | #define LPFC_DRIVER_VERSION "8.3.18" |
22 | #define LPFC_DRIVER_NAME "lpfc" | 22 | #define LPFC_DRIVER_NAME "lpfc" |
23 | #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" | 23 | #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" |
24 | #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" | 24 | #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" |