aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c27
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c38
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c20
5 files changed, 76 insertions, 19 deletions
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 55f984166dbc..d521569e6620 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -962,10 +962,22 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
962 if (phba->sli_rev == LPFC_SLI_REV4) { 962 if (phba->sli_rev == LPFC_SLI_REV4) {
963 evt_dat->immed_dat = phba->ctx_idx; 963 evt_dat->immed_dat = phba->ctx_idx;
964 phba->ctx_idx = (phba->ctx_idx + 1) % 64; 964 phba->ctx_idx = (phba->ctx_idx + 1) % 64;
965 /* Provide warning for over-run of the ct_ctx array */
966 if (phba->ct_ctx[evt_dat->immed_dat].flags &
967 UNSOL_VALID)
968 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
969 "2717 CT context array entry "
970 "[%d] over-run: oxid:x%x, "
971 "sid:x%x\n", phba->ctx_idx,
972 phba->ct_ctx[
973 evt_dat->immed_dat].oxid,
974 phba->ct_ctx[
975 evt_dat->immed_dat].SID);
965 phba->ct_ctx[evt_dat->immed_dat].oxid = 976 phba->ct_ctx[evt_dat->immed_dat].oxid =
966 piocbq->iocb.ulpContext; 977 piocbq->iocb.ulpContext;
967 phba->ct_ctx[evt_dat->immed_dat].SID = 978 phba->ct_ctx[evt_dat->immed_dat].SID =
968 piocbq->iocb.un.rcvels.remoteID; 979 piocbq->iocb.un.rcvels.remoteID;
980 phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID;
969 } else 981 } else
970 evt_dat->immed_dat = piocbq->iocb.ulpContext; 982 evt_dat->immed_dat = piocbq->iocb.ulpContext;
971 983
@@ -1323,6 +1335,21 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1323 rc = IOCB_ERROR; 1335 rc = IOCB_ERROR;
1324 goto issue_ct_rsp_exit; 1336 goto issue_ct_rsp_exit;
1325 } 1337 }
1338
1339 /* Check if the ndlp is active */
1340 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1341 rc = -IOCB_ERROR;
1342 goto issue_ct_rsp_exit;
1343 }
1344
1345 /* get a refernece count so the ndlp doesn't go away while
1346 * we respond
1347 */
1348 if (!lpfc_nlp_get(ndlp)) {
1349 rc = -IOCB_ERROR;
1350 goto issue_ct_rsp_exit;
1351 }
1352
1326 icmd->un.ulpWord[3] = ndlp->nlp_rpi; 1353 icmd->un.ulpWord[3] = ndlp->nlp_rpi;
1327 /* The exchange is done, mark the entry as invalid */ 1354 /* The exchange is done, mark the entry as invalid */
1328 phba->ct_ctx[tag].flags &= ~UNSOL_VALID; 1355 phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 017c933d60ab..f80156246e51 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -6962,6 +6962,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
6962 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 6962 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
6963 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 6963 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
6964 unsigned long iflag = 0; 6964 unsigned long iflag = 0;
6965 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6965 6966
6966 spin_lock_irqsave(&phba->hbalock, iflag); 6967 spin_lock_irqsave(&phba->hbalock, iflag);
6967 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 6968 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
@@ -6974,6 +6975,10 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
6974 sglq_entry->state = SGL_FREED; 6975 sglq_entry->state = SGL_FREED;
6975 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 6976 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
6976 spin_unlock_irqrestore(&phba->hbalock, iflag); 6977 spin_unlock_irqrestore(&phba->hbalock, iflag);
6978
6979 /* Check if TXQ queue needs to be serviced */
6980 if (pring->txq_cnt)
6981 lpfc_worker_wake_up(phba);
6977 return; 6982 return;
6978 } 6983 }
6979 } 6984 }
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 9fcad20491ef..a610464da16e 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -276,7 +276,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
276 !(ndlp->nlp_flag & NLP_DELAY_TMO) && 276 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
277 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && 277 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
278 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 278 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
279 (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE)) 279 (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
280 (ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
280 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 281 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
281 282
282 lpfc_unregister_unused_fcf(phba); 283 lpfc_unregister_unused_fcf(phba);
@@ -587,7 +588,7 @@ lpfc_work_done(struct lpfc_hba *phba)
587 (status & 588 (status &
588 HA_RXMASK)); 589 HA_RXMASK));
589 } 590 }
590 if (phba->pport->work_port_events & WORKER_SERVICE_TXQ) 591 if (pring->txq_cnt)
591 lpfc_drain_txq(phba); 592 lpfc_drain_txq(phba);
592 /* 593 /*
593 * Turn on Ring interrupts 594 * Turn on Ring interrupts
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 7b66b71a14fe..d6089c985c3f 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -623,6 +623,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
623 unsigned long iflag = 0; 623 unsigned long iflag = 0;
624 struct lpfc_iocbq *iocbq; 624 struct lpfc_iocbq *iocbq;
625 int i; 625 int i;
626 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
626 627
627 spin_lock_irqsave(&phba->hbalock, iflag); 628 spin_lock_irqsave(&phba->hbalock, iflag);
628 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 629 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
@@ -651,6 +652,8 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
651 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 652 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
652 psb->exch_busy = 0; 653 psb->exch_busy = 0;
653 spin_unlock_irqrestore(&phba->hbalock, iflag); 654 spin_unlock_irqrestore(&phba->hbalock, iflag);
655 if (pring->txq_cnt)
656 lpfc_worker_wake_up(phba);
654 return; 657 return;
655 658
656 } 659 }
@@ -1322,6 +1325,10 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1322 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1325 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1323 pde5->reftag = reftag; 1326 pde5->reftag = reftag;
1324 1327
1328 /* Endian convertion if necessary for PDE5 */
1329 pde5->word0 = cpu_to_le32(pde5->word0);
1330 pde5->reftag = cpu_to_le32(pde5->reftag);
1331
1325 /* advance bpl and increment bde count */ 1332 /* advance bpl and increment bde count */
1326 num_bde++; 1333 num_bde++;
1327 bpl++; 1334 bpl++;
@@ -1340,6 +1347,11 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1340 bf_set(pde6_ai, pde6, 1); 1347 bf_set(pde6_ai, pde6, 1);
1341 bf_set(pde6_apptagval, pde6, apptagval); 1348 bf_set(pde6_apptagval, pde6, apptagval);
1342 1349
1350 /* Endian convertion if necessary for PDE6 */
1351 pde6->word0 = cpu_to_le32(pde6->word0);
1352 pde6->word1 = cpu_to_le32(pde6->word1);
1353 pde6->word2 = cpu_to_le32(pde6->word2);
1354
1343 /* advance bpl and increment bde count */ 1355 /* advance bpl and increment bde count */
1344 num_bde++; 1356 num_bde++;
1345 bpl++; 1357 bpl++;
@@ -1447,6 +1459,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1447 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1459 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1448 pde5->reftag = reftag; 1460 pde5->reftag = reftag;
1449 1461
1462 /* Endian convertion if necessary for PDE5 */
1463 pde5->word0 = cpu_to_le32(pde5->word0);
1464 pde5->reftag = cpu_to_le32(pde5->reftag);
1465
1450 /* advance bpl and increment bde count */ 1466 /* advance bpl and increment bde count */
1451 num_bde++; 1467 num_bde++;
1452 bpl++; 1468 bpl++;
@@ -1463,6 +1479,11 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1463 bf_set(pde6_ai, pde6, 1); 1479 bf_set(pde6_ai, pde6, 1);
1464 bf_set(pde6_apptagval, pde6, apptagval); 1480 bf_set(pde6_apptagval, pde6, apptagval);
1465 1481
1482 /* Endian convertion if necessary for PDE6 */
1483 pde6->word0 = cpu_to_le32(pde6->word0);
1484 pde6->word1 = cpu_to_le32(pde6->word1);
1485 pde6->word2 = cpu_to_le32(pde6->word2);
1486
1466 /* advance bpl and increment bde count */ 1487 /* advance bpl and increment bde count */
1467 num_bde++; 1488 num_bde++;
1468 bpl++; 1489 bpl++;
@@ -1474,7 +1495,6 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1474 prot_bde->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr)); 1495 prot_bde->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr));
1475 protgroup_len = sg_dma_len(sgpe); 1496 protgroup_len = sg_dma_len(sgpe);
1476 1497
1477
1478 /* must be integer multiple of the DIF block length */ 1498 /* must be integer multiple of the DIF block length */
1479 BUG_ON(protgroup_len % 8); 1499 BUG_ON(protgroup_len % 8);
1480 1500
@@ -3047,7 +3067,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
3047 int ret = SUCCESS; 3067 int ret = SUCCESS;
3048 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 3068 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
3049 3069
3050 fc_block_scsi_eh(cmnd); 3070 ret = fc_block_scsi_eh(cmnd);
3071 if (ret)
3072 return ret;
3051 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 3073 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
3052 BUG_ON(!lpfc_cmd); 3074 BUG_ON(!lpfc_cmd);
3053 3075
@@ -3365,7 +3387,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
3365 return FAILED; 3387 return FAILED;
3366 } 3388 }
3367 pnode = rdata->pnode; 3389 pnode = rdata->pnode;
3368 fc_block_scsi_eh(cmnd); 3390 status = fc_block_scsi_eh(cmnd);
3391 if (status)
3392 return status;
3369 3393
3370 status = lpfc_chk_tgt_mapped(vport, cmnd); 3394 status = lpfc_chk_tgt_mapped(vport, cmnd);
3371 if (status == FAILED) { 3395 if (status == FAILED) {
@@ -3430,7 +3454,9 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
3430 return FAILED; 3454 return FAILED;
3431 } 3455 }
3432 pnode = rdata->pnode; 3456 pnode = rdata->pnode;
3433 fc_block_scsi_eh(cmnd); 3457 status = fc_block_scsi_eh(cmnd);
3458 if (status)
3459 return status;
3434 3460
3435 status = lpfc_chk_tgt_mapped(vport, cmnd); 3461 status = lpfc_chk_tgt_mapped(vport, cmnd);
3436 if (status == FAILED) { 3462 if (status == FAILED) {
@@ -3496,7 +3522,9 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
3496 fc_host_post_vendor_event(shost, fc_get_event_number(), 3522 fc_host_post_vendor_event(shost, fc_get_event_number(),
3497 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 3523 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3498 3524
3499 fc_block_scsi_eh(cmnd); 3525 ret = fc_block_scsi_eh(cmnd);
3526 if (ret)
3527 return ret;
3500 3528
3501 /* 3529 /*
3502 * Since the driver manages a single bus device, reset all 3530 * Since the driver manages a single bus device, reset all
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 7ddf52682271..086f95261605 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -601,15 +601,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
601 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); 601 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
602 602
603 /* Check if TXQ queue needs to be serviced */ 603 /* Check if TXQ queue needs to be serviced */
604 if (pring->txq_cnt) { 604 if (pring->txq_cnt)
605 spin_lock_irqsave(
606 &phba->pport->work_port_lock, iflag);
607 phba->pport->work_port_events |=
608 WORKER_SERVICE_TXQ;
609 lpfc_worker_wake_up(phba); 605 lpfc_worker_wake_up(phba);
610 spin_unlock_irqrestore(
611 &phba->pport->work_port_lock, iflag);
612 }
613 } 606 }
614 } 607 }
615 608
@@ -12757,6 +12750,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12757 LPFC_MBOXQ_t *mb, *nextmb; 12750 LPFC_MBOXQ_t *mb, *nextmb;
12758 struct lpfc_dmabuf *mp; 12751 struct lpfc_dmabuf *mp;
12759 struct lpfc_nodelist *ndlp; 12752 struct lpfc_nodelist *ndlp;
12753 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
12760 12754
12761 spin_lock_irq(&phba->hbalock); 12755 spin_lock_irq(&phba->hbalock);
12762 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 12756 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
@@ -12778,6 +12772,9 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12778 } 12772 }
12779 ndlp = (struct lpfc_nodelist *) mb->context2; 12773 ndlp = (struct lpfc_nodelist *) mb->context2;
12780 if (ndlp) { 12774 if (ndlp) {
12775 spin_lock_irq(shost->host_lock);
12776 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
12777 spin_unlock_irq(shost->host_lock);
12781 lpfc_nlp_put(ndlp); 12778 lpfc_nlp_put(ndlp);
12782 mb->context2 = NULL; 12779 mb->context2 = NULL;
12783 } 12780 }
@@ -12793,6 +12790,9 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12793 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 12790 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
12794 ndlp = (struct lpfc_nodelist *) mb->context2; 12791 ndlp = (struct lpfc_nodelist *) mb->context2;
12795 if (ndlp) { 12792 if (ndlp) {
12793 spin_lock_irq(shost->host_lock);
12794 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
12795 spin_unlock_irq(shost->host_lock);
12796 lpfc_nlp_put(ndlp); 12796 lpfc_nlp_put(ndlp);
12797 mb->context2 = NULL; 12797 mb->context2 = NULL;
12798 } 12798 }
@@ -12879,10 +12879,6 @@ lpfc_drain_txq(struct lpfc_hba *phba)
12879 spin_unlock_irqrestore(&phba->hbalock, iflags); 12879 spin_unlock_irqrestore(&phba->hbalock, iflags);
12880 } 12880 }
12881 12881
12882 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
12883 phba->pport->work_port_events &= ~WORKER_SERVICE_TXQ;
12884 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
12885
12886 /* Cancel all the IOCBs that cannot be issued */ 12882 /* Cancel all the IOCBs that cannot be issued */
12887 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 12883 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
12888 IOERR_SLI_ABORTED); 12884 IOERR_SLI_ABORTED);