aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
authorJames Smart <James.Smart@Emulex.Com>2008-04-07 10:15:56 -0400
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2008-04-10 08:52:11 -0400
commit58da1ffb2b1234e9c6c75013a649c659cc38ebd4 (patch)
treef159b38ff5c830e10eb90918ef5b42ae71645daa /drivers/scsi/lpfc
parentb35c07d00751c3d554dd6e582b661ac2e8ffc162 (diff)
[SCSI] lpfc 8.2.6 : Multiple discovery fixes
Multiple Discovery Fixes: - Fix race on discovery due to link events coinciding with vport_delete. - Use NLP_FABRIC state to filter out switch-based pseudo initiators that reuse the same WWNs. - Correct erroneous setting of DID=0 in lpfc_matchdid() - Correct extra reference count that was in the lookup path for the remoteid from an unsolicited ELS. - Correct double-free bug in els abort path. - Correct FDMI server discovery logic for switch that return a WWN of 0. - Fix bugs in ndlp mgmt when a node changes address - Correct bug that did not delete RSCNs for vports upon link transitions - Fix "0216 Link event during NS query" error which pops up when vports are swapped to different switch ports. - Add sanity checks on ndlp structures - Fix devloss log message to dump WWN correctly - Hold off mgmt commands that were interferring with discovery mailbox cmds - Remove unnecessary FC_ESTABLISH_LINK logic. - Correct some race conditions in the worker thread, resulting in devloss: - Clear the work_port_events field before handling the work port events - Clear the deferred ring event before handling a deferred ring event - Hold the hba lock when waking up the work thread - Send an acc for the rscn even when we aren't going to handle it - Fix locking behavior that was not properly protecting the ACTIVE flag, thus allowing mailbox command order to shift. Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c46
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c85
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c41
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c64
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c40
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c26
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c93
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c3
11 files changed, 208 insertions, 201 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 2ab2d24dcc15..c66d8d19c577 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -268,7 +268,6 @@ struct lpfc_vport {
268#define FC_NLP_MORE 0x40 /* More node to process in node tbl */ 268#define FC_NLP_MORE 0x40 /* More node to process in node tbl */
269#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */ 269#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */
270#define FC_FABRIC 0x100 /* We are fabric attached */ 270#define FC_FABRIC 0x100 /* We are fabric attached */
271#define FC_ESTABLISH_LINK 0x200 /* Reestablish Link */
272#define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */ 271#define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */
273#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */ 272#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
274#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */ 273#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
@@ -433,8 +432,6 @@ struct lpfc_hba {
433 432
434 uint32_t fc_eventTag; /* event tag for link attention */ 433 uint32_t fc_eventTag; /* event tag for link attention */
435 434
436
437 struct timer_list fc_estabtmo; /* link establishment timer */
438 /* These fields used to be binfo */ 435 /* These fields used to be binfo */
439 uint32_t fc_pref_DID; /* preferred D_ID */ 436 uint32_t fc_pref_DID; /* preferred D_ID */
440 uint8_t fc_pref_ALPA; /* preferred AL_PA */ 437 uint8_t fc_pref_ALPA; /* preferred AL_PA */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index b12a841703ca..6917800fc1a7 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1962,7 +1962,11 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
1962 1962
1963 phba->sysfs_mbox.mbox->vport = vport; 1963 phba->sysfs_mbox.mbox->vport = vport;
1964 1964
1965 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { 1965 /* Don't allow mailbox commands to be sent when blocked
1966 * or when in the middle of discovery
1967 */
1968 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO ||
1969 vport->fc_flag & FC_NDISC_ACTIVE) {
1966 sysfs_mbox_idle(phba); 1970 sysfs_mbox_idle(phba);
1967 spin_unlock_irq(&phba->hbalock); 1971 spin_unlock_irq(&phba->hbalock);
1968 return -EAGAIN; 1972 return -EAGAIN;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 3d0ccd9b341d..b64dc711cd8d 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -438,7 +438,7 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
438 (!(vport->ct_flags & FC_CT_RFF_ID)) || 438 (!(vport->ct_flags & FC_CT_RFF_ID)) ||
439 (!vport->cfg_restrict_login)) { 439 (!vport->cfg_restrict_login)) {
440 ndlp = lpfc_setup_disc_node(vport, Did); 440 ndlp = lpfc_setup_disc_node(vport, Did);
441 if (ndlp) { 441 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
442 lpfc_debugfs_disc_trc(vport, 442 lpfc_debugfs_disc_trc(vport,
443 LPFC_DISC_TRC_CT, 443 LPFC_DISC_TRC_CT,
444 "Parse GID_FTrsp: " 444 "Parse GID_FTrsp: "
@@ -543,7 +543,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
543 struct lpfc_dmabuf *outp; 543 struct lpfc_dmabuf *outp;
544 struct lpfc_sli_ct_request *CTrsp; 544 struct lpfc_sli_ct_request *CTrsp;
545 struct lpfc_nodelist *ndlp; 545 struct lpfc_nodelist *ndlp;
546 int rc, retry; 546 int rc;
547 547
548 /* First save ndlp, before we overwrite it */ 548 /* First save ndlp, before we overwrite it */
549 ndlp = cmdiocb->context_un.ndlp; 549 ndlp = cmdiocb->context_un.ndlp;
@@ -563,45 +563,29 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
563 if (vport->load_flag & FC_UNLOADING) 563 if (vport->load_flag & FC_UNLOADING)
564 goto out; 564 goto out;
565 565
566 if (lpfc_els_chk_latt(vport) || lpfc_error_lost_link(irsp)) { 566 if (lpfc_els_chk_latt(vport)) {
567 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 567 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
568 "0216 Link event during NS query\n"); 568 "0216 Link event during NS query\n");
569 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 569 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
570 goto out; 570 goto out;
571 } 571 }
572 572 if (lpfc_error_lost_link(irsp)) {
573 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
574 "0226 NS query failed due to link event\n");
575 goto out;
576 }
573 if (irsp->ulpStatus) { 577 if (irsp->ulpStatus) {
574 /* Check for retry */ 578 /* Check for retry */
575 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { 579 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
576 retry = 1; 580 if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
577 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 581 irsp->un.ulpWord[4] != IOERR_NO_RESOURCES)
578 switch (irsp->un.ulpWord[4]) {
579 case IOERR_NO_RESOURCES:
580 /* We don't increment the retry
581 * count for this case.
582 */
583 break;
584 case IOERR_LINK_DOWN:
585 case IOERR_SLI_ABORTED:
586 case IOERR_SLI_DOWN:
587 retry = 0;
588 break;
589 default:
590 vport->fc_ns_retry++;
591 }
592 }
593 else
594 vport->fc_ns_retry++; 582 vport->fc_ns_retry++;
595 583
596 if (retry) { 584 /* CT command is being retried */
597 /* CT command is being retried */ 585 rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
598 rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
599 vport->fc_ns_retry, 0); 586 vport->fc_ns_retry, 0);
600 if (rc == 0) { 587 if (rc == 0)
601 /* success */ 588 goto out;
602 goto out;
603 }
604 }
605 } 589 }
606 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 590 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
607 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 591 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
@@ -780,7 +764,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
780 764
781 /* This is a target port, unregistered port, or the GFF_ID failed */ 765 /* This is a target port, unregistered port, or the GFF_ID failed */
782 ndlp = lpfc_setup_disc_node(vport, did); 766 ndlp = lpfc_setup_disc_node(vport, did);
783 if (ndlp) { 767 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
784 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 768 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
785 "0242 Process x%x GFF " 769 "0242 Process x%x GFF "
786 "NameServer Rsp Data: x%x x%x x%x\n", 770 "NameServer Rsp Data: x%x x%x x%x\n",
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 783d1eea13ef..90272e65957a 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -503,6 +503,8 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
503 ndlp->nlp_sid); 503 ndlp->nlp_sid);
504 if (ndlp->nlp_type & NLP_FCP_INITIATOR) 504 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
505 len += snprintf(buf+len, size-len, "FCP_INITIATOR "); 505 len += snprintf(buf+len, size-len, "FCP_INITIATOR ");
506 len += snprintf(buf+len, size-len, "usgmap:%x ",
507 ndlp->nlp_usg_map);
506 len += snprintf(buf+len, size-len, "refcnt:%x", 508 len += snprintf(buf+len, size-len, "refcnt:%x",
507 atomic_read(&ndlp->kref.refcount)); 509 atomic_read(&ndlp->kref.refcount));
508 len += snprintf(buf+len, size-len, "\n"); 510 len += snprintf(buf+len, size-len, "\n");
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index cbb68a942255..6e0e991c6445 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -719,9 +719,9 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
719 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR && 719 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
720 icmd->un.elsreq64.bdl.ulpIoTag32) { 720 icmd->un.elsreq64.bdl.ulpIoTag32) {
721 ndlp = (struct lpfc_nodelist *)(iocb->context1); 721 ndlp = (struct lpfc_nodelist *)(iocb->context1);
722 if (ndlp && (ndlp->nlp_DID == Fabric_DID)) { 722 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
723 (ndlp->nlp_DID == Fabric_DID))
723 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 724 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
724 }
725 } 725 }
726 } 726 }
727 spin_unlock_irq(&phba->hbalock); 727 spin_unlock_irq(&phba->hbalock);
@@ -829,7 +829,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
829 struct fc_rport *rport; 829 struct fc_rport *rport;
830 struct serv_parm *sp; 830 struct serv_parm *sp;
831 uint8_t name[sizeof(struct lpfc_name)]; 831 uint8_t name[sizeof(struct lpfc_name)];
832 uint32_t rc; 832 uint32_t rc, keepDID = 0;
833 833
834 /* Fabric nodes can have the same WWPN so we don't bother searching 834 /* Fabric nodes can have the same WWPN so we don't bother searching
835 * by WWPN. Just return the ndlp that was given to us. 835 * by WWPN. Just return the ndlp that was given to us.
@@ -858,11 +858,17 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
858 return ndlp; 858 return ndlp;
859 lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID); 859 lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
860 } else if (!NLP_CHK_NODE_ACT(new_ndlp)) { 860 } else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
861 rc = memcmp(&ndlp->nlp_portname, name,
862 sizeof(struct lpfc_name));
863 if (!rc)
864 return ndlp;
861 new_ndlp = lpfc_enable_node(vport, new_ndlp, 865 new_ndlp = lpfc_enable_node(vport, new_ndlp,
862 NLP_STE_UNUSED_NODE); 866 NLP_STE_UNUSED_NODE);
863 if (!new_ndlp) 867 if (!new_ndlp)
864 return ndlp; 868 return ndlp;
865 } 869 keepDID = new_ndlp->nlp_DID;
870 } else
871 keepDID = new_ndlp->nlp_DID;
866 872
867 lpfc_unreg_rpi(vport, new_ndlp); 873 lpfc_unreg_rpi(vport, new_ndlp);
868 new_ndlp->nlp_DID = ndlp->nlp_DID; 874 new_ndlp->nlp_DID = ndlp->nlp_DID;
@@ -893,12 +899,24 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
893 } 899 }
894 new_ndlp->nlp_type = ndlp->nlp_type; 900 new_ndlp->nlp_type = ndlp->nlp_type;
895 } 901 }
902 /* We shall actually free the ndlp with both nlp_DID and
903 * nlp_portname fields equals 0 to avoid any ndlp on the
904 * nodelist never to be used.
905 */
906 if (ndlp->nlp_DID == 0) {
907 spin_lock_irq(&phba->ndlp_lock);
908 NLP_SET_FREE_REQ(ndlp);
909 spin_unlock_irq(&phba->ndlp_lock);
910 }
896 911
912 /* Two ndlps cannot have the same did on the nodelist */
913 ndlp->nlp_DID = keepDID;
897 lpfc_drop_node(vport, ndlp); 914 lpfc_drop_node(vport, ndlp);
898 } 915 }
899 else { 916 else {
900 lpfc_unreg_rpi(vport, ndlp); 917 lpfc_unreg_rpi(vport, ndlp);
901 ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */ 918 /* Two ndlps cannot have the same did */
919 ndlp->nlp_DID = keepDID;
902 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 920 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
903 } 921 }
904 return new_ndlp; 922 return new_ndlp;
@@ -2091,7 +2109,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2091 } 2109 }
2092 2110
2093 phba->fc_stat.elsXmitRetry++; 2111 phba->fc_stat.elsXmitRetry++;
2094 if (ndlp && delay) { 2112 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) {
2095 phba->fc_stat.elsDelayRetry++; 2113 phba->fc_stat.elsDelayRetry++;
2096 ndlp->nlp_retry = cmdiocb->retry; 2114 ndlp->nlp_retry = cmdiocb->retry;
2097 2115
@@ -2121,7 +2139,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2121 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); 2139 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
2122 return 1; 2140 return 1;
2123 case ELS_CMD_PLOGI: 2141 case ELS_CMD_PLOGI:
2124 if (ndlp) { 2142 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
2125 ndlp->nlp_prev_state = ndlp->nlp_state; 2143 ndlp->nlp_prev_state = ndlp->nlp_state;
2126 lpfc_nlp_set_state(vport, ndlp, 2144 lpfc_nlp_set_state(vport, ndlp,
2127 NLP_STE_PLOGI_ISSUE); 2145 NLP_STE_PLOGI_ISSUE);
@@ -2302,7 +2320,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2302 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2320 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2303 kfree(mp); 2321 kfree(mp);
2304 mempool_free(pmb, phba->mbox_mem_pool); 2322 mempool_free(pmb, phba->mbox_mem_pool);
2305 if (ndlp) { 2323 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
2306 lpfc_nlp_put(ndlp); 2324 lpfc_nlp_put(ndlp);
2307 /* This is the end of the default RPI cleanup logic for this 2325 /* This is the end of the default RPI cleanup logic for this
2308 * ndlp. If no other discovery threads are using this ndlp. 2326 * ndlp. If no other discovery threads are using this ndlp.
@@ -2335,7 +2353,8 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2335 * function can have cmdiocb->contest1 (ndlp) field set to NULL. 2353 * function can have cmdiocb->contest1 (ndlp) field set to NULL.
2336 */ 2354 */
2337 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); 2355 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
2338 if (ndlp && (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) { 2356 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
2357 (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
2339 /* A LS_RJT associated with Default RPI cleanup has its own 2358 /* A LS_RJT associated with Default RPI cleanup has its own
2340 * seperate code path. 2359 * seperate code path.
2341 */ 2360 */
@@ -2344,7 +2363,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2344 } 2363 }
2345 2364
2346 /* Check to see if link went down during discovery */ 2365 /* Check to see if link went down during discovery */
2347 if (!ndlp || lpfc_els_chk_latt(vport)) { 2366 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
2348 if (mbox) { 2367 if (mbox) {
2349 mp = (struct lpfc_dmabuf *) mbox->context1; 2368 mp = (struct lpfc_dmabuf *) mbox->context1;
2350 if (mp) { 2369 if (mp) {
@@ -2353,7 +2372,8 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2353 } 2372 }
2354 mempool_free(mbox, phba->mbox_mem_pool); 2373 mempool_free(mbox, phba->mbox_mem_pool);
2355 } 2374 }
2356 if (ndlp && (ndlp->nlp_flag & NLP_RM_DFLT_RPI)) 2375 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
2376 (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
2357 if (lpfc_nlp_not_used(ndlp)) { 2377 if (lpfc_nlp_not_used(ndlp)) {
2358 ndlp = NULL; 2378 ndlp = NULL;
2359 /* Indicate the node has already released, 2379 /* Indicate the node has already released,
@@ -2443,7 +2463,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2443 mempool_free(mbox, phba->mbox_mem_pool); 2463 mempool_free(mbox, phba->mbox_mem_pool);
2444 } 2464 }
2445out: 2465out:
2446 if (ndlp) { 2466 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
2447 spin_lock_irq(shost->host_lock); 2467 spin_lock_irq(shost->host_lock);
2448 ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI); 2468 ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
2449 spin_unlock_irq(shost->host_lock); 2469 spin_unlock_irq(shost->host_lock);
@@ -3139,6 +3159,8 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3139 /* Another thread is walking fc_rscn_id_list on this vport */ 3159 /* Another thread is walking fc_rscn_id_list on this vport */
3140 spin_unlock_irq(shost->host_lock); 3160 spin_unlock_irq(shost->host_lock);
3141 vport->fc_flag |= FC_RSCN_DISCOVERY; 3161 vport->fc_flag |= FC_RSCN_DISCOVERY;
3162 /* Send back ACC */
3163 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
3142 return 0; 3164 return 0;
3143 } 3165 }
3144 /* Indicate we are walking fc_rscn_id_list on this vport */ 3166 /* Indicate we are walking fc_rscn_id_list on this vport */
@@ -3928,7 +3950,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
3928 else { 3950 else {
3929 struct lpfc_nodelist *ndlp; 3951 struct lpfc_nodelist *ndlp;
3930 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext); 3952 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
3931 if (ndlp) 3953 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
3932 remote_ID = ndlp->nlp_DID; 3954 remote_ID = ndlp->nlp_DID;
3933 } 3955 }
3934 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3956 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
@@ -4097,21 +4119,22 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4097 newnode = 1; 4119 newnode = 1;
4098 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 4120 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
4099 ndlp->nlp_type |= NLP_FABRIC; 4121 ndlp->nlp_type |= NLP_FABRIC;
4100 } else { 4122 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
4101 if (!NLP_CHK_NODE_ACT(ndlp)) { 4123 ndlp = lpfc_enable_node(vport, ndlp,
4102 ndlp = lpfc_enable_node(vport, ndlp, 4124 NLP_STE_UNUSED_NODE);
4103 NLP_STE_UNUSED_NODE); 4125 if (!ndlp)
4104 if (!ndlp) 4126 goto dropit;
4105 goto dropit; 4127 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
4106 } 4128 newnode = 1;
4107 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 4129 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
4108 /* This is simular to the new node path */ 4130 ndlp->nlp_type |= NLP_FABRIC;
4109 ndlp = lpfc_nlp_get(ndlp); 4131 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
4110 if (!ndlp) 4132 /* This is similar to the new node path */
4111 goto dropit; 4133 ndlp = lpfc_nlp_get(ndlp);
4112 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 4134 if (!ndlp)
4113 newnode = 1; 4135 goto dropit;
4114 } 4136 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
4137 newnode = 1;
4115 } 4138 }
4116 4139
4117 phba->fc_stat.elsRcvFrame++; 4140 phba->fc_stat.elsRcvFrame++;
@@ -4451,7 +4474,6 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
4451 return; 4474 return;
4452 } 4475 }
4453 lpfc_nlp_init(vport, ndlp, NameServer_DID); 4476 lpfc_nlp_init(vport, ndlp, NameServer_DID);
4454 ndlp->nlp_type |= NLP_FABRIC;
4455 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 4477 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
4456 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 4478 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
4457 if (!ndlp) { 4479 if (!ndlp) {
@@ -4465,6 +4487,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
4465 return; 4487 return;
4466 } 4488 }
4467 } 4489 }
4490 ndlp->nlp_type |= NLP_FABRIC;
4468 4491
4469 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 4492 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
4470 4493
@@ -4481,8 +4504,8 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
4481 if (ndlp_fdmi) { 4504 if (ndlp_fdmi) {
4482 lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID); 4505 lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
4483 ndlp_fdmi->nlp_type |= NLP_FABRIC; 4506 ndlp_fdmi->nlp_type |= NLP_FABRIC;
4484 ndlp_fdmi->nlp_state = 4507 lpfc_nlp_set_state(vport, ndlp_fdmi,
4485 NLP_STE_PLOGI_ISSUE; 4508 NLP_STE_PLOGI_ISSUE);
4486 lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID, 4509 lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID,
4487 0); 4510 0);
4488 } 4511 }
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 976653440fba..7c8c3e6f399d 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -69,7 +69,7 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
69 rdata = rport->dd_data; 69 rdata = rport->dd_data;
70 ndlp = rdata->pnode; 70 ndlp = rdata->pnode;
71 71
72 if (!ndlp) { 72 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
73 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) 73 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
74 printk(KERN_ERR "Cannot find remote node" 74 printk(KERN_ERR "Cannot find remote node"
75 " to terminate I/O Data x%x\n", 75 " to terminate I/O Data x%x\n",
@@ -114,7 +114,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
114 114
115 rdata = rport->dd_data; 115 rdata = rport->dd_data;
116 ndlp = rdata->pnode; 116 ndlp = rdata->pnode;
117 if (!ndlp) 117 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
118 return; 118 return;
119 119
120 vport = ndlp->vport; 120 vport = ndlp->vport;
@@ -243,8 +243,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
243 if (warn_on) { 243 if (warn_on) {
244 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 244 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
245 "0203 Devloss timeout on " 245 "0203 Devloss timeout on "
246 "WWPN %x:%x:%x:%x:%x:%x:%x:%x " 246 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
247 "NPort x%x Data: x%x x%x x%x\n", 247 "NPort x%06x Data: x%x x%x x%x\n",
248 *name, *(name+1), *(name+2), *(name+3), 248 *name, *(name+1), *(name+2), *(name+3),
249 *(name+4), *(name+5), *(name+6), *(name+7), 249 *(name+4), *(name+5), *(name+6), *(name+7),
250 ndlp->nlp_DID, ndlp->nlp_flag, 250 ndlp->nlp_DID, ndlp->nlp_flag,
@@ -252,8 +252,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
252 } else { 252 } else {
253 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 253 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
254 "0204 Devloss timeout on " 254 "0204 Devloss timeout on "
255 "WWPN %x:%x:%x:%x:%x:%x:%x:%x " 255 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
256 "NPort x%x Data: x%x x%x x%x\n", 256 "NPort x%06x Data: x%x x%x x%x\n",
257 *name, *(name+1), *(name+2), *(name+3), 257 *name, *(name+1), *(name+2), *(name+3),
258 *(name+4), *(name+5), *(name+6), *(name+7), 258 *(name+4), *(name+5), *(name+6), *(name+7),
259 ndlp->nlp_DID, ndlp->nlp_flag, 259 ndlp->nlp_DID, ndlp->nlp_flag,
@@ -399,7 +399,10 @@ lpfc_work_done(struct lpfc_hba *phba)
399 vport = vports[i]; 399 vport = vports[i];
400 if (vport == NULL) 400 if (vport == NULL)
401 break; 401 break;
402 spin_lock_irq(&vport->work_port_lock);
402 work_port_events = vport->work_port_events; 403 work_port_events = vport->work_port_events;
404 vport->work_port_events &= ~work_port_events;
405 spin_unlock_irq(&vport->work_port_lock);
403 if (work_port_events & WORKER_DISC_TMO) 406 if (work_port_events & WORKER_DISC_TMO)
404 lpfc_disc_timeout_handler(vport); 407 lpfc_disc_timeout_handler(vport);
405 if (work_port_events & WORKER_ELS_TMO) 408 if (work_port_events & WORKER_ELS_TMO)
@@ -416,9 +419,6 @@ lpfc_work_done(struct lpfc_hba *phba)
416 lpfc_ramp_down_queue_handler(phba); 419 lpfc_ramp_down_queue_handler(phba);
417 if (work_port_events & WORKER_RAMP_UP_QUEUE) 420 if (work_port_events & WORKER_RAMP_UP_QUEUE)
418 lpfc_ramp_up_queue_handler(phba); 421 lpfc_ramp_up_queue_handler(phba);
419 spin_lock_irq(&vport->work_port_lock);
420 vport->work_port_events &= ~work_port_events;
421 spin_unlock_irq(&vport->work_port_lock);
422 } 422 }
423 lpfc_destroy_vport_work_array(phba, vports); 423 lpfc_destroy_vport_work_array(phba, vports);
424 424
@@ -430,10 +430,10 @@ lpfc_work_done(struct lpfc_hba *phba)
430 if (pring->flag & LPFC_STOP_IOCB_EVENT) { 430 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
431 pring->flag |= LPFC_DEFERRED_RING_EVENT; 431 pring->flag |= LPFC_DEFERRED_RING_EVENT;
432 } else { 432 } else {
433 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
433 lpfc_sli_handle_slow_ring_event(phba, pring, 434 lpfc_sli_handle_slow_ring_event(phba, pring,
434 (status & 435 (status &
435 HA_RXMASK)); 436 HA_RXMASK));
436 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
437 } 437 }
438 /* 438 /*
439 * Turn on Ring interrupts 439 * Turn on Ring interrupts
@@ -519,7 +519,9 @@ lpfc_do_work(void *p)
519 schedule(); 519 schedule();
520 } 520 }
521 } 521 }
522 spin_lock_irq(&phba->hbalock);
522 phba->work_wait = NULL; 523 phba->work_wait = NULL;
524 spin_unlock_irq(&phba->hbalock);
523 return 0; 525 return 0;
524} 526}
525 527
@@ -809,11 +811,9 @@ out:
809 mempool_free(pmb, phba->mbox_mem_pool); 811 mempool_free(pmb, phba->mbox_mem_pool);
810 812
811 spin_lock_irq(shost->host_lock); 813 spin_lock_irq(shost->host_lock);
812 vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_ESTABLISH_LINK); 814 vport->fc_flag &= ~FC_ABORT_DISCOVERY;
813 spin_unlock_irq(shost->host_lock); 815 spin_unlock_irq(shost->host_lock);
814 816
815 del_timer_sync(&phba->fc_estabtmo);
816
817 lpfc_can_disctmo(vport); 817 lpfc_can_disctmo(vport);
818 818
819 /* turn on Link Attention interrupts */ 819 /* turn on Link Attention interrupts */
@@ -1340,10 +1340,14 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1340 i++) { 1340 i++) {
1341 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 1341 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
1342 continue; 1342 continue;
1343 if (phba->fc_topology == TOPOLOGY_LOOP) {
1344 lpfc_vport_set_state(vports[i],
1345 FC_VPORT_LINKDOWN);
1346 continue;
1347 }
1343 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) 1348 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1344 lpfc_initial_fdisc(vports[i]); 1349 lpfc_initial_fdisc(vports[i]);
1345 else if (phba->sli3_options & 1350 else {
1346 LPFC_SLI3_NPIV_ENABLED) {
1347 lpfc_vport_set_state(vports[i], 1351 lpfc_vport_set_state(vports[i],
1348 FC_VPORT_NO_FABRIC_SUPP); 1352 FC_VPORT_NO_FABRIC_SUPP);
1349 lpfc_printf_vlog(vport, KERN_ERR, 1353 lpfc_printf_vlog(vport, KERN_ERR,
@@ -2190,10 +2194,6 @@ lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2190 if (did == Bcast_DID) 2194 if (did == Bcast_DID)
2191 return 0; 2195 return 0;
2192 2196
2193 if (ndlp->nlp_DID == 0) {
2194 return 0;
2195 }
2196
2197 /* First check for Direct match */ 2197 /* First check for Direct match */
2198 if (ndlp->nlp_DID == did) 2198 if (ndlp->nlp_DID == did)
2199 return 1; 2199 return 1;
@@ -2301,7 +2301,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
2301 return ndlp; 2301 return ndlp;
2302 } 2302 }
2303 2303
2304 if (vport->fc_flag & FC_RSCN_MODE) { 2304 if ((vport->fc_flag & FC_RSCN_MODE) &&
2305 !(vport->fc_flag & FC_NDISC_ACTIVE)) {
2305 if (lpfc_rscn_payload_check(vport, did)) { 2306 if (lpfc_rscn_payload_check(vport, did)) {
2306 /* If we've already recieved a PLOGI from this NPort 2307 /* If we've already recieved a PLOGI from this NPort
2307 * we don't need to try to discover it again. 2308 * we don't need to try to discover it again.
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 22843751c2ca..26c67c866d1f 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -559,8 +559,10 @@ lpfc_hb_timeout(unsigned long ptr)
559 phba->pport->work_port_events |= WORKER_HB_TMO; 559 phba->pport->work_port_events |= WORKER_HB_TMO;
560 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 560 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
561 561
562 spin_lock_irqsave(&phba->hbalock, iflag);
562 if (phba->work_wait) 563 if (phba->work_wait)
563 wake_up(phba->work_wait); 564 wake_up(phba->work_wait);
565 spin_unlock_irqrestore(&phba->hbalock, iflag);
564 return; 566 return;
565} 567}
566 568
@@ -714,12 +716,10 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
714 struct lpfc_vport *vport = phba->pport; 716 struct lpfc_vport *vport = phba->pport;
715 struct lpfc_sli *psli = &phba->sli; 717 struct lpfc_sli *psli = &phba->sli;
716 struct lpfc_sli_ring *pring; 718 struct lpfc_sli_ring *pring;
717 struct lpfc_vport **vports;
718 uint32_t event_data; 719 uint32_t event_data;
719 unsigned long temperature; 720 unsigned long temperature;
720 struct temp_event temp_event_data; 721 struct temp_event temp_event_data;
721 struct Scsi_Host *shost; 722 struct Scsi_Host *shost;
722 int i;
723 723
724 /* If the pci channel is offline, ignore possible errors, 724 /* If the pci channel is offline, ignore possible errors,
725 * since we cannot communicate with the pci card anyway. */ 725 * since we cannot communicate with the pci card anyway. */
@@ -737,17 +737,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
737 "Data: x%x x%x x%x\n", 737 "Data: x%x x%x x%x\n",
738 phba->work_hs, 738 phba->work_hs,
739 phba->work_status[0], phba->work_status[1]); 739 phba->work_status[0], phba->work_status[1]);
740 vports = lpfc_create_vport_work_array(phba); 740
741 if (vports != NULL)
742 for(i = 0;
743 i <= phba->max_vpi && vports[i] != NULL;
744 i++){
745 shost = lpfc_shost_from_vport(vports[i]);
746 spin_lock_irq(shost->host_lock);
747 vports[i]->fc_flag |= FC_ESTABLISH_LINK;
748 spin_unlock_irq(shost->host_lock);
749 }
750 lpfc_destroy_vport_work_array(phba, vports);
751 spin_lock_irq(&phba->hbalock); 741 spin_lock_irq(&phba->hbalock);
752 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 742 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
753 spin_unlock_irq(&phba->hbalock); 743 spin_unlock_irq(&phba->hbalock);
@@ -761,7 +751,6 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
761 pring = &psli->ring[psli->fcp_ring]; 751 pring = &psli->ring[psli->fcp_ring];
762 lpfc_sli_abort_iocb_ring(phba, pring); 752 lpfc_sli_abort_iocb_ring(phba, pring);
763 753
764
765 /* 754 /*
766 * There was a firmware error. Take the hba offline and then 755 * There was a firmware error. Take the hba offline and then
767 * attempt to restart it. 756 * attempt to restart it.
@@ -770,7 +759,6 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
770 lpfc_offline(phba); 759 lpfc_offline(phba);
771 lpfc_sli_brdrestart(phba); 760 lpfc_sli_brdrestart(phba);
772 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 761 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
773 mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
774 lpfc_unblock_mgmt_io(phba); 762 lpfc_unblock_mgmt_io(phba);
775 return; 763 return;
776 } 764 }
@@ -1454,6 +1442,13 @@ lpfc_cleanup(struct lpfc_vport *vport)
1454 NLP_SET_FREE_REQ(ndlp); 1442 NLP_SET_FREE_REQ(ndlp);
1455 spin_unlock_irq(&phba->ndlp_lock); 1443 spin_unlock_irq(&phba->ndlp_lock);
1456 1444
1445 if (vport->port_type != LPFC_PHYSICAL_PORT &&
1446 ndlp->nlp_DID == Fabric_DID) {
1447 /* Just free up ndlp with Fabric_DID for vports */
1448 lpfc_nlp_put(ndlp);
1449 continue;
1450 }
1451
1457 if (ndlp->nlp_type & NLP_FABRIC) 1452 if (ndlp->nlp_type & NLP_FABRIC)
1458 lpfc_disc_state_machine(vport, ndlp, NULL, 1453 lpfc_disc_state_machine(vport, ndlp, NULL,
1459 NLP_EVT_DEVICE_RECOVERY); 1454 NLP_EVT_DEVICE_RECOVERY);
@@ -1491,31 +1486,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
1491 return; 1486 return;
1492} 1487}
1493 1488
1494static void
1495lpfc_establish_link_tmo(unsigned long ptr)
1496{
1497 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
1498 struct lpfc_vport **vports;
1499 unsigned long iflag;
1500 int i;
1501
1502 /* Re-establishing Link, timer expired */
1503 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1504 "1300 Re-establishing Link, timer expired "
1505 "Data: x%x x%x\n",
1506 phba->pport->fc_flag, phba->pport->port_state);
1507 vports = lpfc_create_vport_work_array(phba);
1508 if (vports != NULL)
1509 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1510 struct Scsi_Host *shost;
1511 shost = lpfc_shost_from_vport(vports[i]);
1512 spin_lock_irqsave(shost->host_lock, iflag);
1513 vports[i]->fc_flag &= ~FC_ESTABLISH_LINK;
1514 spin_unlock_irqrestore(shost->host_lock, iflag);
1515 }
1516 lpfc_destroy_vport_work_array(phba, vports);
1517}
1518
1519void 1489void
1520lpfc_stop_vport_timers(struct lpfc_vport *vport) 1490lpfc_stop_vport_timers(struct lpfc_vport *vport)
1521{ 1491{
@@ -1529,7 +1499,6 @@ static void
1529lpfc_stop_phba_timers(struct lpfc_hba *phba) 1499lpfc_stop_phba_timers(struct lpfc_hba *phba)
1530{ 1500{
1531 del_timer_sync(&phba->fcp_poll_timer); 1501 del_timer_sync(&phba->fcp_poll_timer);
1532 del_timer_sync(&phba->fc_estabtmo);
1533 lpfc_stop_vport_timers(phba->pport); 1502 lpfc_stop_vport_timers(phba->pport);
1534 del_timer_sync(&phba->sli.mbox_tmo); 1503 del_timer_sync(&phba->sli.mbox_tmo);
1535 del_timer_sync(&phba->fabric_block_timer); 1504 del_timer_sync(&phba->fabric_block_timer);
@@ -2005,10 +1974,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2005 phba->max_vpi = LPFC_MAX_VPI; 1974 phba->max_vpi = LPFC_MAX_VPI;
2006 1975
2007 /* Initialize timers used by driver */ 1976 /* Initialize timers used by driver */
2008 init_timer(&phba->fc_estabtmo);
2009 phba->fc_estabtmo.function = lpfc_establish_link_tmo;
2010 phba->fc_estabtmo.data = (unsigned long)phba;
2011
2012 init_timer(&phba->hb_tmofunc); 1977 init_timer(&phba->hb_tmofunc);
2013 phba->hb_tmofunc.function = lpfc_hb_timeout; 1978 phba->hb_tmofunc.function = lpfc_hb_timeout;
2014 phba->hb_tmofunc.data = (unsigned long)phba; 1979 phba->hb_tmofunc.data = (unsigned long)phba;
@@ -2416,11 +2381,6 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
2416 2381
2417 pci_set_master(pdev); 2382 pci_set_master(pdev);
2418 2383
2419 /* Re-establishing Link */
2420 spin_lock_irq(shost->host_lock);
2421 phba->pport->fc_flag |= FC_ESTABLISH_LINK;
2422 spin_unlock_irq(shost->host_lock);
2423
2424 spin_lock_irq(&phba->hbalock); 2384 spin_lock_irq(&phba->hbalock);
2425 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 2385 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
2426 spin_unlock_irq(&phba->hbalock); 2386 spin_unlock_irq(&phba->hbalock);
@@ -2445,9 +2405,7 @@ static void lpfc_io_resume(struct pci_dev *pdev)
2445 struct Scsi_Host *shost = pci_get_drvdata(pdev); 2405 struct Scsi_Host *shost = pci_get_drvdata(pdev);
2446 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 2406 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2447 2407
2448 if (lpfc_online(phba) == 0) { 2408 lpfc_online(phba);
2449 mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
2450 }
2451} 2409}
2452 2410
2453static struct pci_device_id lpfc_id_table[] = { 2411static struct pci_device_id lpfc_id_table[] = {
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index d513813f6697..d08c4c890744 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -451,7 +451,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
451 spin_unlock_irq(shost->host_lock); 451 spin_unlock_irq(shost->host_lock);
452 452
453 if ((ndlp->nlp_flag & NLP_ADISC_SND) && 453 if ((ndlp->nlp_flag & NLP_ADISC_SND) &&
454 (vport->num_disc_nodes)) { 454 (vport->num_disc_nodes)) {
455 /* Check to see if there are more 455 /* Check to see if there are more
456 * ADISCs to be sent 456 * ADISCs to be sent
457 */ 457 */
@@ -469,20 +469,23 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
469 lpfc_end_rscn(vport); 469 lpfc_end_rscn(vport);
470 } 470 }
471 } 471 }
472 else if (vport->num_disc_nodes) { 472 }
473 /* Check to see if there are more 473 } else if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
474 * PLOGIs to be sent 474 (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
475 */ 475 (vport->num_disc_nodes)) {
476 lpfc_more_plogi(vport); 476 spin_lock_irq(shost->host_lock);
477 477 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
478 if (vport->num_disc_nodes == 0) { 478 spin_unlock_irq(shost->host_lock);
479 spin_lock_irq(shost->host_lock); 479 /* Check to see if there are more
480 vport->fc_flag &= ~FC_NDISC_ACTIVE; 480 * PLOGIs to be sent
481 spin_unlock_irq(shost->host_lock); 481 */
482 lpfc_can_disctmo(vport); 482 lpfc_more_plogi(vport);
483 lpfc_end_rscn(vport); 483 if (vport->num_disc_nodes == 0) {
484 } 484 spin_lock_irq(shost->host_lock);
485 } 485 vport->fc_flag &= ~FC_NDISC_ACTIVE;
486 spin_unlock_irq(shost->host_lock);
487 lpfc_can_disctmo(vport);
488 lpfc_end_rscn(vport);
486 } 489 }
487 } 490 }
488 491
@@ -869,8 +872,11 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
869 872
870 lp = (uint32_t *) prsp->virt; 873 lp = (uint32_t *) prsp->virt;
871 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 874 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
872 if (wwn_to_u64(sp->portName.u.wwn) == 0 || 875
873 wwn_to_u64(sp->nodeName.u.wwn) == 0) { 876 /* Some switches have FDMI servers returning 0 for WWN */
877 if ((ndlp->nlp_DID != FDMI_DID) &&
878 (wwn_to_u64(sp->portName.u.wwn) == 0 ||
879 wwn_to_u64(sp->nodeName.u.wwn) == 0)) {
874 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 880 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
875 "0142 PLOGI RSP: Invalid WWN.\n"); 881 "0142 PLOGI RSP: Invalid WWN.\n");
876 goto out; 882 goto out;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 70255c11d3ad..6df8bc003a88 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -578,14 +578,14 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
578 lpfc_cmd->result == IOERR_NO_RESOURCES || 578 lpfc_cmd->result == IOERR_NO_RESOURCES ||
579 lpfc_cmd->result == RJT_LOGIN_REQUIRED) { 579 lpfc_cmd->result == RJT_LOGIN_REQUIRED) {
580 cmd->result = ScsiResult(DID_REQUEUE, 0); 580 cmd->result = ScsiResult(DID_REQUEUE, 0);
581 break; 581 break;
582 } /* else: fall through */ 582 } /* else: fall through */
583 default: 583 default:
584 cmd->result = ScsiResult(DID_ERROR, 0); 584 cmd->result = ScsiResult(DID_ERROR, 0);
585 break; 585 break;
586 } 586 }
587 587
588 if ((pnode == NULL ) 588 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
589 || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 589 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
590 cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY); 590 cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY);
591 } else { 591 } else {
@@ -626,7 +626,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
626 if (!result) 626 if (!result)
627 lpfc_rampup_queue_depth(vport, sdev); 627 lpfc_rampup_queue_depth(vport, sdev);
628 628
629 if (!result && pnode != NULL && 629 if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
630 ((jiffies - pnode->last_ramp_up_time) > 630 ((jiffies - pnode->last_ramp_up_time) >
631 LPFC_Q_RAMP_UP_INTERVAL * HZ) && 631 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
632 ((jiffies - pnode->last_q_full_time) > 632 ((jiffies - pnode->last_q_full_time) >
@@ -654,7 +654,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
654 * Check for queue full. If the lun is reporting queue full, then 654 * Check for queue full. If the lun is reporting queue full, then
655 * back off the lun queue depth to prevent target overloads. 655 * back off the lun queue depth to prevent target overloads.
656 */ 656 */
657 if (result == SAM_STAT_TASK_SET_FULL && pnode != NULL) { 657 if (result == SAM_STAT_TASK_SET_FULL && pnode &&
658 NLP_CHK_NODE_ACT(pnode)) {
658 pnode->last_q_full_time = jiffies; 659 pnode->last_q_full_time = jiffies;
659 660
660 shost_for_each_device(tmp_sdev, sdev->host) { 661 shost_for_each_device(tmp_sdev, sdev->host) {
@@ -704,6 +705,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
704 int datadir = scsi_cmnd->sc_data_direction; 705 int datadir = scsi_cmnd->sc_data_direction;
705 char tag[2]; 706 char tag[2];
706 707
708 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
709 return;
710
707 lpfc_cmd->fcp_rsp->rspSnsLen = 0; 711 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
708 /* clear task management bits */ 712 /* clear task management bits */
709 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; 713 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
@@ -785,9 +789,9 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
785 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 789 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
786 struct lpfc_nodelist *ndlp = rdata->pnode; 790 struct lpfc_nodelist *ndlp = rdata->pnode;
787 791
788 if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 792 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
793 ndlp->nlp_state != NLP_STE_MAPPED_NODE)
789 return 0; 794 return 0;
790 }
791 795
792 piocbq = &(lpfc_cmd->cur_iocbq); 796 piocbq = &(lpfc_cmd->cur_iocbq);
793 piocbq->vport = vport; 797 piocbq->vport = vport;
@@ -842,7 +846,7 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
842 struct lpfc_iocbq *iocbqrsp; 846 struct lpfc_iocbq *iocbqrsp;
843 int ret; 847 int ret;
844 848
845 if (!rdata->pnode) 849 if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
846 return FAILED; 850 return FAILED;
847 851
848 lpfc_cmd->rdata = rdata; 852 lpfc_cmd->rdata = rdata;
@@ -959,7 +963,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
959 * Catch race where our node has transitioned, but the 963 * Catch race where our node has transitioned, but the
960 * transport is still transitioning. 964 * transport is still transitioning.
961 */ 965 */
962 if (!ndlp) { 966 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
963 cmnd->result = ScsiResult(DID_BUS_BUSY, 0); 967 cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
964 goto out_fail_command; 968 goto out_fail_command;
965 } 969 }
@@ -1146,7 +1150,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1146 * target is rediscovered or devloss timeout expires. 1150 * target is rediscovered or devloss timeout expires.
1147 */ 1151 */
1148 while (1) { 1152 while (1) {
1149 if (!pnode) 1153 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
1150 goto out; 1154 goto out;
1151 1155
1152 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) { 1156 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
@@ -1162,7 +1166,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1162 goto out; 1166 goto out;
1163 } 1167 }
1164 pnode = rdata->pnode; 1168 pnode = rdata->pnode;
1165 if (!pnode) 1169 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
1166 goto out; 1170 goto out;
1167 } 1171 }
1168 if (pnode->nlp_state == NLP_STE_MAPPED_NODE) 1172 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index fc0d9501aba6..c71b9a577770 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -2648,7 +2648,6 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
2648 spin_unlock_irq(&phba->pport->work_port_lock); 2648 spin_unlock_irq(&phba->pport->work_port_lock);
2649 spin_lock_irq(&phba->hbalock); 2649 spin_lock_irq(&phba->hbalock);
2650 phba->link_state = LPFC_LINK_UNKNOWN; 2650 phba->link_state = LPFC_LINK_UNKNOWN;
2651 phba->pport->fc_flag |= FC_ESTABLISH_LINK;
2652 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 2651 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
2653 spin_unlock_irq(&phba->hbalock); 2652 spin_unlock_irq(&phba->hbalock);
2654 2653
@@ -2669,8 +2668,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
2669 lpfc_offline_prep(phba); 2668 lpfc_offline_prep(phba);
2670 lpfc_offline(phba); 2669 lpfc_offline(phba);
2671 lpfc_sli_brdrestart(phba); 2670 lpfc_sli_brdrestart(phba);
2672 if (lpfc_online(phba) == 0) /* Initialize the HBA */ 2671 lpfc_online(phba);
2673 mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
2674 lpfc_unblock_mgmt_io(phba); 2672 lpfc_unblock_mgmt_io(phba);
2675 return; 2673 return;
2676} 2674}
@@ -2687,28 +2685,41 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2687 unsigned long drvr_flag = 0; 2685 unsigned long drvr_flag = 0;
2688 volatile uint32_t word0, ldata; 2686 volatile uint32_t word0, ldata;
2689 void __iomem *to_slim; 2687 void __iomem *to_slim;
2688 int processing_queue = 0;
2689
2690 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2691 if (!pmbox) {
2692 /* processing mbox queue from intr_handler */
2693 processing_queue = 1;
2694 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2695 pmbox = lpfc_mbox_get(phba);
2696 if (!pmbox) {
2697 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2698 return MBX_SUCCESS;
2699 }
2700 }
2690 2701
2691 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 2702 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
2692 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 2703 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
2693 if(!pmbox->vport) { 2704 if(!pmbox->vport) {
2705 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2694 lpfc_printf_log(phba, KERN_ERR, 2706 lpfc_printf_log(phba, KERN_ERR,
2695 LOG_MBOX | LOG_VPORT, 2707 LOG_MBOX | LOG_VPORT,
2696 "1806 Mbox x%x failed. No vport\n", 2708 "1806 Mbox x%x failed. No vport\n",
2697 pmbox->mb.mbxCommand); 2709 pmbox->mb.mbxCommand);
2698 dump_stack(); 2710 dump_stack();
2699 return MBX_NOT_FINISHED; 2711 goto out_not_finished;
2700 } 2712 }
2701 } 2713 }
2702 2714
2703
2704 /* If the PCI channel is in offline state, do not post mbox. */ 2715 /* If the PCI channel is in offline state, do not post mbox. */
2705 if (unlikely(pci_channel_offline(phba->pcidev))) 2716 if (unlikely(pci_channel_offline(phba->pcidev))) {
2706 return MBX_NOT_FINISHED; 2717 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2718 goto out_not_finished;
2719 }
2707 2720
2708 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2709 psli = &phba->sli; 2721 psli = &phba->sli;
2710 2722
2711
2712 mb = &pmbox->mb; 2723 mb = &pmbox->mb;
2713 status = MBX_SUCCESS; 2724 status = MBX_SUCCESS;
2714 2725
@@ -2717,14 +2728,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2717 2728
2718 /* Mbox command <mbxCommand> cannot issue */ 2729 /* Mbox command <mbxCommand> cannot issue */
2719 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 2730 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2720 return MBX_NOT_FINISHED; 2731 goto out_not_finished;
2721 } 2732 }
2722 2733
2723 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && 2734 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
2724 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { 2735 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
2725 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2736 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2726 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 2737 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2727 return MBX_NOT_FINISHED; 2738 goto out_not_finished;
2728 } 2739 }
2729 2740
2730 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 2741 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
@@ -2738,14 +2749,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2738 2749
2739 /* Mbox command <mbxCommand> cannot issue */ 2750 /* Mbox command <mbxCommand> cannot issue */
2740 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 2751 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2741 return MBX_NOT_FINISHED; 2752 goto out_not_finished;
2742 } 2753 }
2743 2754
2744 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { 2755 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
2745 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2756 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2746 /* Mbox command <mbxCommand> cannot issue */ 2757 /* Mbox command <mbxCommand> cannot issue */
2747 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 2758 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2748 return MBX_NOT_FINISHED; 2759 goto out_not_finished;
2749 } 2760 }
2750 2761
2751 /* Another mailbox command is still being processed, queue this 2762 /* Another mailbox command is still being processed, queue this
@@ -2792,7 +2803,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2792 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2803 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2793 /* Mbox command <mbxCommand> cannot issue */ 2804 /* Mbox command <mbxCommand> cannot issue */
2794 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 2805 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2795 return MBX_NOT_FINISHED; 2806 goto out_not_finished;
2796 } 2807 }
2797 /* timeout active mbox command */ 2808 /* timeout active mbox command */
2798 mod_timer(&psli->mbox_tmo, (jiffies + 2809 mod_timer(&psli->mbox_tmo, (jiffies +
@@ -2900,7 +2911,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2900 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2911 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2901 spin_unlock_irqrestore(&phba->hbalock, 2912 spin_unlock_irqrestore(&phba->hbalock,
2902 drvr_flag); 2913 drvr_flag);
2903 return MBX_NOT_FINISHED; 2914 goto out_not_finished;
2904 } 2915 }
2905 2916
2906 /* Check if we took a mbox interrupt while we were 2917 /* Check if we took a mbox interrupt while we were
@@ -2967,6 +2978,13 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2967 2978
2968 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2979 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2969 return status; 2980 return status;
2981
2982out_not_finished:
2983 if (processing_queue) {
2984 pmbox->mb.mbxStatus = MBX_NOT_FINISHED;
2985 lpfc_mbox_cmpl_put(phba, pmbox);
2986 }
2987 return MBX_NOT_FINISHED;
2970} 2988}
2971 2989
2972/* 2990/*
@@ -3613,6 +3631,16 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3613 irsp->ulpStatus, irsp->un.ulpWord[4]); 3631 irsp->ulpStatus, irsp->un.ulpWord[4]);
3614 3632
3615 /* 3633 /*
3634 * If the iocb is not found in Firmware queue the iocb
3635 * might have completed already. Do not free it again.
3636 */
3637 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3638 (irsp->un.ulpWord[4] == IOERR_NO_XRI)) {
3639 spin_unlock_irq(&phba->hbalock);
3640 lpfc_sli_release_iocbq(phba, cmdiocb);
3641 return;
3642 }
3643 /*
3616 * make sure we have the right iocbq before taking it 3644 * make sure we have the right iocbq before taking it
3617 * off the txcmplq and try to call completion routine. 3645 * off the txcmplq and try to call completion routine.
3618 */ 3646 */
@@ -4237,10 +4265,15 @@ lpfc_intr_handler(int irq, void *dev_id)
4237 pmb->context1 = mp; 4265 pmb->context1 = mp;
4238 pmb->context2 = ndlp; 4266 pmb->context2 = ndlp;
4239 pmb->vport = vport; 4267 pmb->vport = vport;
4240 spin_lock(&phba->hbalock); 4268 rc = lpfc_sli_issue_mbox(phba,
4241 phba->sli.sli_flag &= 4269 pmb,
4242 ~LPFC_SLI_MBOX_ACTIVE; 4270 MBX_NOWAIT);
4243 spin_unlock(&phba->hbalock); 4271 if (rc != MBX_BUSY)
4272 lpfc_printf_log(phba,
4273 KERN_ERR,
4274 LOG_MBOX | LOG_SLI,
4275 "0306 rc should have"
4276 "been MBX_BUSY");
4244 goto send_current_mbox; 4277 goto send_current_mbox;
4245 } 4278 }
4246 } 4279 }
@@ -4253,22 +4286,16 @@ lpfc_intr_handler(int irq, void *dev_id)
4253 } 4286 }
4254 if ((work_ha_copy & HA_MBATT) && 4287 if ((work_ha_copy & HA_MBATT) &&
4255 (phba->sli.mbox_active == NULL)) { 4288 (phba->sli.mbox_active == NULL)) {
4256send_next_mbox:
4257 spin_lock(&phba->hbalock);
4258 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4259 pmb = lpfc_mbox_get(phba);
4260 spin_unlock(&phba->hbalock);
4261send_current_mbox: 4289send_current_mbox:
4262 /* Process next mailbox command if there is one */ 4290 /* Process next mailbox command if there is one */
4263 if (pmb != NULL) { 4291 do {
4264 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4292 rc = lpfc_sli_issue_mbox(phba, NULL,
4265 if (rc == MBX_NOT_FINISHED) { 4293 MBX_NOWAIT);
4266 pmb->mb.mbxStatus = MBX_NOT_FINISHED; 4294 } while (rc == MBX_NOT_FINISHED);
4267 lpfc_mbox_cmpl_put(phba, pmb); 4295 if (rc != MBX_SUCCESS)
4268 goto send_next_mbox; 4296 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
4269 } 4297 LOG_SLI, "0349 rc should be "
4270 } 4298 "MBX_SUCCESS");
4271
4272 } 4299 }
4273 4300
4274 spin_lock(&phba->hbalock); 4301 spin_lock(&phba->hbalock);
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 86d05beb00b8..6feaf59b0b1b 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -538,7 +538,8 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
538 /* Otherwise, we will perform fabric logo as needed */ 538 /* Otherwise, we will perform fabric logo as needed */
539 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 539 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
540 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && 540 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
541 phba->link_state >= LPFC_LINK_UP) { 541 phba->link_state >= LPFC_LINK_UP &&
542 phba->fc_topology != TOPOLOGY_LOOP) {
542 if (vport->cfg_enable_da_id) { 543 if (vport->cfg_enable_da_id) {
543 timeout = msecs_to_jiffies(phba->fc_ratov * 2000); 544 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
544 if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0)) 545 if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0))