aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_scsi.c
diff options
context:
space:
mode:
authorJames Smart <James.Smart@Emulex.Com>2008-04-07 10:15:56 -0400
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2008-04-10 08:52:11 -0400
commit58da1ffb2b1234e9c6c75013a649c659cc38ebd4 (patch)
treef159b38ff5c830e10eb90918ef5b42ae71645daa /drivers/scsi/lpfc/lpfc_scsi.c
parentb35c07d00751c3d554dd6e582b661ac2e8ffc162 (diff)
[SCSI] lpfc 8.2.6 : Multiple discovery fixes
Multiple Discovery Fixes: - Fix race on discovery due to link events coinciding with vport_delete. - Use NLP_FABRIC state to filter out switch-based pseudo initiators that reuse the same WWNs. - Correct erroneous setting of DID=0 in lpfc_matchdid() - Correct extra reference count that was in the lookup path for the remoteid from an unsolicited ELS. - Correct double-free bug in els abort path. - Correct FDMI server discovery logic for switch that return a WWN of 0. - Fix bugs in ndlp mgmt when a node changes address - Correct bug that did not delete RSCNs for vports upon link transitions - Fix "0216 Link event during NS query" error which pops up when vports are swapped to different switch ports. - Add sanity checks on ndlp structures - Fix devloss log message to dump WWN correctly - Hold off mgmt commands that were interferring with discovery mailbox cmds - Remove unnecessary FC_ESTABLISH_LINK logic. - Correct some race conditions in the worker thread, resulting in devloss: - Clear the work_port_events field before handling the work port events - Clear the deferred ring event before handling a deferred ring event - Hold the hba lock when waking up the work thread - Send an acc for the rscn even when we aren't going to handle it - Fix locking behavior that was not properly protecting the ACTIVE flag, thus allowing mailbox command order to shift. Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_scsi.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c26
1 files changed, 15 insertions, 11 deletions
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 70255c11d3ad..6df8bc003a88 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -578,14 +578,14 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
578 lpfc_cmd->result == IOERR_NO_RESOURCES || 578 lpfc_cmd->result == IOERR_NO_RESOURCES ||
579 lpfc_cmd->result == RJT_LOGIN_REQUIRED) { 579 lpfc_cmd->result == RJT_LOGIN_REQUIRED) {
580 cmd->result = ScsiResult(DID_REQUEUE, 0); 580 cmd->result = ScsiResult(DID_REQUEUE, 0);
581 break; 581 break;
582 } /* else: fall through */ 582 } /* else: fall through */
583 default: 583 default:
584 cmd->result = ScsiResult(DID_ERROR, 0); 584 cmd->result = ScsiResult(DID_ERROR, 0);
585 break; 585 break;
586 } 586 }
587 587
588 if ((pnode == NULL ) 588 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
589 || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 589 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
590 cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY); 590 cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY);
591 } else { 591 } else {
@@ -626,7 +626,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
626 if (!result) 626 if (!result)
627 lpfc_rampup_queue_depth(vport, sdev); 627 lpfc_rampup_queue_depth(vport, sdev);
628 628
629 if (!result && pnode != NULL && 629 if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
630 ((jiffies - pnode->last_ramp_up_time) > 630 ((jiffies - pnode->last_ramp_up_time) >
631 LPFC_Q_RAMP_UP_INTERVAL * HZ) && 631 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
632 ((jiffies - pnode->last_q_full_time) > 632 ((jiffies - pnode->last_q_full_time) >
@@ -654,7 +654,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
654 * Check for queue full. If the lun is reporting queue full, then 654 * Check for queue full. If the lun is reporting queue full, then
655 * back off the lun queue depth to prevent target overloads. 655 * back off the lun queue depth to prevent target overloads.
656 */ 656 */
657 if (result == SAM_STAT_TASK_SET_FULL && pnode != NULL) { 657 if (result == SAM_STAT_TASK_SET_FULL && pnode &&
658 NLP_CHK_NODE_ACT(pnode)) {
658 pnode->last_q_full_time = jiffies; 659 pnode->last_q_full_time = jiffies;
659 660
660 shost_for_each_device(tmp_sdev, sdev->host) { 661 shost_for_each_device(tmp_sdev, sdev->host) {
@@ -704,6 +705,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
704 int datadir = scsi_cmnd->sc_data_direction; 705 int datadir = scsi_cmnd->sc_data_direction;
705 char tag[2]; 706 char tag[2];
706 707
708 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
709 return;
710
707 lpfc_cmd->fcp_rsp->rspSnsLen = 0; 711 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
708 /* clear task management bits */ 712 /* clear task management bits */
709 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; 713 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
@@ -785,9 +789,9 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
785 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 789 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
786 struct lpfc_nodelist *ndlp = rdata->pnode; 790 struct lpfc_nodelist *ndlp = rdata->pnode;
787 791
788 if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 792 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
793 ndlp->nlp_state != NLP_STE_MAPPED_NODE)
789 return 0; 794 return 0;
790 }
791 795
792 piocbq = &(lpfc_cmd->cur_iocbq); 796 piocbq = &(lpfc_cmd->cur_iocbq);
793 piocbq->vport = vport; 797 piocbq->vport = vport;
@@ -842,7 +846,7 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
842 struct lpfc_iocbq *iocbqrsp; 846 struct lpfc_iocbq *iocbqrsp;
843 int ret; 847 int ret;
844 848
845 if (!rdata->pnode) 849 if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
846 return FAILED; 850 return FAILED;
847 851
848 lpfc_cmd->rdata = rdata; 852 lpfc_cmd->rdata = rdata;
@@ -959,7 +963,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
959 * Catch race where our node has transitioned, but the 963 * Catch race where our node has transitioned, but the
960 * transport is still transitioning. 964 * transport is still transitioning.
961 */ 965 */
962 if (!ndlp) { 966 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
963 cmnd->result = ScsiResult(DID_BUS_BUSY, 0); 967 cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
964 goto out_fail_command; 968 goto out_fail_command;
965 } 969 }
@@ -1146,7 +1150,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1146 * target is rediscovered or devloss timeout expires. 1150 * target is rediscovered or devloss timeout expires.
1147 */ 1151 */
1148 while (1) { 1152 while (1) {
1149 if (!pnode) 1153 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
1150 goto out; 1154 goto out;
1151 1155
1152 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) { 1156 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
@@ -1162,7 +1166,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1162 goto out; 1166 goto out;
1163 } 1167 }
1164 pnode = rdata->pnode; 1168 pnode = rdata->pnode;
1165 if (!pnode) 1169 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
1166 goto out; 1170 goto out;
1167 } 1171 }
1168 if (pnode->nlp_state == NLP_STE_MAPPED_NODE) 1172 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)