aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_sli.c
diff options
context:
space:
mode:
authorJames Smart <james.smart@emulex.com>2010-06-07 15:24:45 -0400
committerJames Bottomley <James.Bottomley@suse.de>2010-07-27 13:01:34 -0400
commit2a9bf3d011303d8da64cd5e0e7fdd95f0c143984 (patch)
treefbec390538ff91adb0bee7116b3627524134db01 /drivers/scsi/lpfc/lpfc_sli.c
parent98fc5dd952ecfd3abff7c06e7a55a5eab4dd95b7 (diff)
[SCSI] lpfc 8.3.13: Add TX Queue Support for SLI4 ELS commands.
Signed-off-by: Alex Iannicelli <alex.iannicelli@emulex.com> Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_sli.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c194
1 files changed, 170 insertions, 24 deletions
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 103a5aa4ae81..ae3cb0ab0ae4 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -455,6 +455,11 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
455 struct lpfc_iocbq * iocbq = NULL; 455 struct lpfc_iocbq * iocbq = NULL;
456 456
457 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 457 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
458
459 if (iocbq)
460 phba->iocb_cnt++;
461 if (phba->iocb_cnt > phba->iocb_max)
462 phba->iocb_max = phba->iocb_cnt;
458 return iocbq; 463 return iocbq;
459} 464}
460 465
@@ -575,7 +580,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
575{ 580{
576 struct lpfc_sglq *sglq; 581 struct lpfc_sglq *sglq;
577 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 582 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
578 unsigned long iflag; 583 unsigned long iflag = 0;
584 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
579 585
580 if (iocbq->sli4_xritag == NO_XRI) 586 if (iocbq->sli4_xritag == NO_XRI)
581 sglq = NULL; 587 sglq = NULL;
@@ -593,6 +599,17 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
593 } else { 599 } else {
594 sglq->state = SGL_FREED; 600 sglq->state = SGL_FREED;
595 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); 601 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
602
603 /* Check if TXQ queue needs to be serviced */
604 if (pring->txq_cnt) {
605 spin_lock_irqsave(
606 &phba->pport->work_port_lock, iflag);
607 phba->pport->work_port_events |=
608 WORKER_SERVICE_TXQ;
609 lpfc_worker_wake_up(phba);
610 spin_unlock_irqrestore(
611 &phba->pport->work_port_lock, iflag);
612 }
596 } 613 }
597 } 614 }
598 615
@@ -605,6 +622,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
605 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 622 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
606} 623}
607 624
625
608/** 626/**
609 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool 627 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
610 * @phba: Pointer to HBA context object. 628 * @phba: Pointer to HBA context object.
@@ -642,6 +660,7 @@ static void
642__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 660__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
643{ 661{
644 phba->__lpfc_sli_release_iocbq(phba, iocbq); 662 phba->__lpfc_sli_release_iocbq(phba, iocbq);
663 phba->iocb_cnt--;
645} 664}
646 665
647/** 666/**
@@ -872,7 +891,11 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
872 struct lpfc_iocbq *piocb) 891 struct lpfc_iocbq *piocb)
873{ 892{
874 list_add_tail(&piocb->list, &pring->txcmplq); 893 list_add_tail(&piocb->list, &pring->txcmplq);
894 piocb->iocb_flag |= LPFC_IO_ON_Q;
875 pring->txcmplq_cnt++; 895 pring->txcmplq_cnt++;
896 if (pring->txcmplq_cnt > pring->txcmplq_max)
897 pring->txcmplq_max = pring->txcmplq_cnt;
898
876 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 899 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
877 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 900 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
878 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 901 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
@@ -897,7 +920,7 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
897 * the txq, the function returns first iocb in the list after 920 * the txq, the function returns first iocb in the list after
898 * removing the iocb from the list, else it returns NULL. 921 * removing the iocb from the list, else it returns NULL.
899 **/ 922 **/
900static struct lpfc_iocbq * 923struct lpfc_iocbq *
901lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 924lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
902{ 925{
903 struct lpfc_iocbq *cmd_iocb; 926 struct lpfc_iocbq *cmd_iocb;
@@ -2150,7 +2173,10 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2150 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2173 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2151 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2174 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2152 list_del_init(&cmd_iocb->list); 2175 list_del_init(&cmd_iocb->list);
2153 pring->txcmplq_cnt--; 2176 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) {
2177 pring->txcmplq_cnt--;
2178 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
2179 }
2154 return cmd_iocb; 2180 return cmd_iocb;
2155 } 2181 }
2156 2182
@@ -2183,7 +2209,10 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2183 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2209 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2184 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2210 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2185 list_del_init(&cmd_iocb->list); 2211 list_del_init(&cmd_iocb->list);
2186 pring->txcmplq_cnt--; 2212 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) {
2213 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
2214 pring->txcmplq_cnt--;
2215 }
2187 return cmd_iocb; 2216 return cmd_iocb;
2188 } 2217 }
2189 2218
@@ -5578,7 +5607,7 @@ lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5578 * iocb to the txq when SLI layer cannot submit the command iocb 5607 * iocb to the txq when SLI layer cannot submit the command iocb
5579 * to the ring. 5608 * to the ring.
5580 **/ 5609 **/
5581static void 5610void
5582__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 5611__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5583 struct lpfc_iocbq *piocb) 5612 struct lpfc_iocbq *piocb)
5584{ 5613{
@@ -6195,7 +6224,6 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6195 struct lpfc_iocbq *piocb, uint32_t flag) 6224 struct lpfc_iocbq *piocb, uint32_t flag)
6196{ 6225{
6197 struct lpfc_sglq *sglq; 6226 struct lpfc_sglq *sglq;
6198 uint16_t xritag;
6199 union lpfc_wqe wqe; 6227 union lpfc_wqe wqe;
6200 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 6228 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
6201 6229
@@ -6204,10 +6232,26 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6204 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 6232 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
6205 sglq = NULL; 6233 sglq = NULL;
6206 else { 6234 else {
6235 if (pring->txq_cnt) {
6236 if (!(flag & SLI_IOCB_RET_IOCB)) {
6237 __lpfc_sli_ringtx_put(phba,
6238 pring, piocb);
6239 return IOCB_SUCCESS;
6240 } else {
6241 return IOCB_BUSY;
6242 }
6243 } else {
6207 sglq = __lpfc_sli_get_sglq(phba); 6244 sglq = __lpfc_sli_get_sglq(phba);
6208 if (!sglq) 6245 if (!sglq) {
6209 return IOCB_ERROR; 6246 if (!(flag & SLI_IOCB_RET_IOCB)) {
6210 piocb->sli4_xritag = sglq->sli4_xritag; 6247 __lpfc_sli_ringtx_put(phba,
6248 pring,
6249 piocb);
6250 return IOCB_SUCCESS;
6251 } else
6252 return IOCB_BUSY;
6253 }
6254 }
6211 } 6255 }
6212 } else if (piocb->iocb_flag & LPFC_IO_FCP) { 6256 } else if (piocb->iocb_flag & LPFC_IO_FCP) {
6213 sglq = NULL; /* These IO's already have an XRI and 6257 sglq = NULL; /* These IO's already have an XRI and
@@ -6223,8 +6267,9 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6223 } 6267 }
6224 6268
6225 if (sglq) { 6269 if (sglq) {
6226 xritag = lpfc_sli4_bpl2sgl(phba, piocb, sglq); 6270 piocb->sli4_xritag = sglq->sli4_xritag;
6227 if (xritag != sglq->sli4_xritag) 6271
6272 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
6228 return IOCB_ERROR; 6273 return IOCB_ERROR;
6229 } 6274 }
6230 6275
@@ -6264,7 +6309,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6264 * IOCB_SUCCESS - Success 6309 * IOCB_SUCCESS - Success
6265 * IOCB_BUSY - Busy 6310 * IOCB_BUSY - Busy
6266 **/ 6311 **/
6267static inline int 6312int
6268__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 6313__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
6269 struct lpfc_iocbq *piocb, uint32_t flag) 6314 struct lpfc_iocbq *piocb, uint32_t flag)
6270{ 6315{
@@ -7081,13 +7126,6 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7081 */ 7126 */
7082 abort_iocb = phba->sli.iocbq_lookup[abort_context]; 7127 abort_iocb = phba->sli.iocbq_lookup[abort_context];
7083 7128
7084 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
7085 "0327 Cannot abort els iocb %p "
7086 "with tag %x context %x, abort status %x, "
7087 "abort code %x\n",
7088 abort_iocb, abort_iotag, abort_context,
7089 irsp->ulpStatus, irsp->un.ulpWord[4]);
7090
7091 /* 7129 /*
7092 * If the iocb is not found in Firmware queue the iocb 7130 * If the iocb is not found in Firmware queue the iocb
7093 * might have completed already. Do not free it again. 7131 * might have completed already. Do not free it again.
@@ -7106,6 +7144,13 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7106 if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4) 7144 if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4)
7107 abort_context = abort_iocb->iocb.ulpContext; 7145 abort_context = abort_iocb->iocb.ulpContext;
7108 } 7146 }
7147
7148 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
7149 "0327 Cannot abort els iocb %p "
7150 "with tag %x context %x, abort status %x, "
7151 "abort code %x\n",
7152 abort_iocb, abort_iotag, abort_context,
7153 irsp->ulpStatus, irsp->un.ulpWord[4]);
7109 /* 7154 /*
7110 * make sure we have the right iocbq before taking it 7155 * make sure we have the right iocbq before taking it
7111 * off the txcmplq and try to call completion routine. 7156 * off the txcmplq and try to call completion routine.
@@ -7123,7 +7168,10 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7123 * following abort XRI from the HBA. 7168 * following abort XRI from the HBA.
7124 */ 7169 */
7125 list_del_init(&abort_iocb->list); 7170 list_del_init(&abort_iocb->list);
7126 pring->txcmplq_cnt--; 7171 if (abort_iocb->iocb_flag & LPFC_IO_ON_Q) {
7172 abort_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
7173 pring->txcmplq_cnt--;
7174 }
7127 7175
7128 /* Firmware could still be in progress of DMAing 7176 /* Firmware could still be in progress of DMAing
7129 * payload, so don't free data buffer till after 7177 * payload, so don't free data buffer till after
@@ -7255,8 +7303,9 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7255 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 7303 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
7256 "0339 Abort xri x%x, original iotag x%x, " 7304 "0339 Abort xri x%x, original iotag x%x, "
7257 "abort cmd iotag x%x\n", 7305 "abort cmd iotag x%x\n",
7306 iabt->un.acxri.abortIoTag,
7258 iabt->un.acxri.abortContextTag, 7307 iabt->un.acxri.abortContextTag,
7259 iabt->un.acxri.abortIoTag, abtsiocbp->iotag); 7308 abtsiocbp->iotag);
7260 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0); 7309 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0);
7261 7310
7262 if (retval) 7311 if (retval)
@@ -7586,7 +7635,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
7586 long timeleft, timeout_req = 0; 7635 long timeleft, timeout_req = 0;
7587 int retval = IOCB_SUCCESS; 7636 int retval = IOCB_SUCCESS;
7588 uint32_t creg_val; 7637 uint32_t creg_val;
7589 7638 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
7590 /* 7639 /*
7591 * If the caller has provided a response iocbq buffer, then context2 7640 * If the caller has provided a response iocbq buffer, then context2
7592 * is NULL or its an error. 7641 * is NULL or its an error.
@@ -7608,7 +7657,8 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
7608 readl(phba->HCregaddr); /* flush */ 7657 readl(phba->HCregaddr); /* flush */
7609 } 7658 }
7610 7659
7611 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 0); 7660 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
7661 SLI_IOCB_RET_IOCB);
7612 if (retval == IOCB_SUCCESS) { 7662 if (retval == IOCB_SUCCESS) {
7613 timeout_req = timeout * HZ; 7663 timeout_req = timeout * HZ;
7614 timeleft = wait_event_timeout(done_q, 7664 timeleft = wait_event_timeout(done_q,
@@ -7630,6 +7680,11 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
7630 timeout, (timeleft / jiffies)); 7680 timeout, (timeleft / jiffies));
7631 retval = IOCB_TIMEDOUT; 7681 retval = IOCB_TIMEDOUT;
7632 } 7682 }
7683 } else if (retval == IOCB_BUSY) {
7684 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7685 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
7686 phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt);
7687 return retval;
7633 } else { 7688 } else {
7634 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 7689 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7635 "0332 IOCB wait issue failed, Data x%x\n", 7690 "0332 IOCB wait issue failed, Data x%x\n",
@@ -8775,12 +8830,17 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
8775{ 8830{
8776 struct lpfc_iocbq *irspiocbq; 8831 struct lpfc_iocbq *irspiocbq;
8777 unsigned long iflags; 8832 unsigned long iflags;
8833 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
8778 8834
8779 /* Get an irspiocbq for later ELS response processing use */ 8835 /* Get an irspiocbq for later ELS response processing use */
8780 irspiocbq = lpfc_sli_get_iocbq(phba); 8836 irspiocbq = lpfc_sli_get_iocbq(phba);
8781 if (!irspiocbq) { 8837 if (!irspiocbq) {
8782 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8838 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8783 "0387 Failed to allocate an iocbq\n"); 8839 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
8840 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
8841 pring->txq_cnt, phba->iocb_cnt,
8842 phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt,
8843 phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt);
8784 return false; 8844 return false;
8785 } 8845 }
8786 8846
@@ -12695,3 +12755,89 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12695 spin_unlock_irq(&phba->hbalock); 12755 spin_unlock_irq(&phba->hbalock);
12696} 12756}
12697 12757
12758/**
12759 * lpfc_drain_txq - Drain the txq
12760 * @phba: Pointer to HBA context object.
12761 *
12762 * This function attempt to submit IOCBs on the txq
12763 * to the adapter. For SLI4 adapters, the txq contains
12764 * ELS IOCBs that have been deferred because the there
12765 * are no SGLs. This congestion can occur with large
12766 * vport counts during node discovery.
12767 **/
12768
12769uint32_t
12770lpfc_drain_txq(struct lpfc_hba *phba)
12771{
12772 LIST_HEAD(completions);
12773 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
12774 struct lpfc_iocbq *piocbq = 0;
12775 unsigned long iflags = 0;
12776 char *fail_msg = NULL;
12777 struct lpfc_sglq *sglq;
12778 union lpfc_wqe wqe;
12779
12780 spin_lock_irqsave(&phba->hbalock, iflags);
12781 if (pring->txq_cnt > pring->txq_max)
12782 pring->txq_max = pring->txq_cnt;
12783
12784 spin_unlock_irqrestore(&phba->hbalock, iflags);
12785
12786 while (pring->txq_cnt) {
12787 spin_lock_irqsave(&phba->hbalock, iflags);
12788
12789 sglq = __lpfc_sli_get_sglq(phba);
12790 if (!sglq) {
12791 spin_unlock_irqrestore(&phba->hbalock, iflags);
12792 break;
12793 } else {
12794 piocbq = lpfc_sli_ringtx_get(phba, pring);
12795 if (!piocbq) {
12796 /* The txq_cnt out of sync. This should
12797 * never happen
12798 */
12799 sglq = __lpfc_clear_active_sglq(phba,
12800 sglq->sli4_xritag);
12801 spin_unlock_irqrestore(&phba->hbalock, iflags);
12802 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12803 "2823 txq empty and txq_cnt is %d\n ",
12804 pring->txq_cnt);
12805 break;
12806 }
12807 }
12808
12809 /* The xri and iocb resources secured,
12810 * attempt to issue request
12811 */
12812 piocbq->sli4_xritag = sglq->sli4_xritag;
12813 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
12814 fail_msg = "to convert bpl to sgl";
12815 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
12816 fail_msg = "to convert iocb to wqe";
12817 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
12818 fail_msg = " - Wq is full";
12819 else
12820 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
12821
12822 if (fail_msg) {
12823 /* Failed means we can't issue and need to cancel */
12824 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12825 "2822 IOCB failed %s iotag 0x%x "
12826 "xri 0x%x\n",
12827 fail_msg,
12828 piocbq->iotag, piocbq->sli4_xritag);
12829 list_add_tail(&piocbq->list, &completions);
12830 }
12831 spin_unlock_irqrestore(&phba->hbalock, iflags);
12832 }
12833
12834 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
12835 phba->pport->work_port_events &= ~WORKER_SERVICE_TXQ;
12836 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
12837
12838 /* Cancel all the IOCBs that cannot be issued */
12839 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
12840 IOERR_SLI_ABORTED);
12841
12842 return pring->txq_cnt;
12843}