aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_sli.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_sli.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c523
1 files changed, 245 insertions, 278 deletions
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index e74e224fd77c..508710001ed6 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -65,6 +65,28 @@ typedef enum _lpfc_iocb_type {
65 LPFC_ABORT_IOCB 65 LPFC_ABORT_IOCB
66} lpfc_iocb_type; 66} lpfc_iocb_type;
67 67
68struct lpfc_iocbq *
69lpfc_sli_get_iocbq(struct lpfc_hba * phba)
70{
71 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
72 struct lpfc_iocbq * iocbq = NULL;
73
74 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
75 return iocbq;
76}
77
78void
79lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
80{
81 size_t start_clean = (size_t)(&((struct lpfc_iocbq *)NULL)->iocb);
82
83 /*
84 * Clean all volatile data fields, preserve iotag and node struct.
85 */
86 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
87 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
88}
89
68/* 90/*
69 * Translate the iocb command to an iocb command type used to decide the final 91 * Translate the iocb command to an iocb command type used to decide the final
70 * disposition of each completed IOCB. 92 * disposition of each completed IOCB.
@@ -265,41 +287,69 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
265 return iocb; 287 return iocb;
266} 288}
267 289
268static uint32_t 290uint16_t
269lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_sli_ring * pring) 291lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
270{ 292{
271 uint32_t search_start; 293 struct lpfc_iocbq ** new_arr;
294 struct lpfc_iocbq ** old_arr;
295 size_t new_len;
296 struct lpfc_sli *psli = &phba->sli;
297 uint16_t iotag;
272 298
273 if (pring->fast_lookup == NULL) { 299 spin_lock_irq(phba->host->host_lock);
274 pring->iotag_ctr++; 300 iotag = psli->last_iotag;
275 if (pring->iotag_ctr >= pring->iotag_max) 301 if(++iotag < psli->iocbq_lookup_len) {
276 pring->iotag_ctr = 1; 302 psli->last_iotag = iotag;
277 return pring->iotag_ctr; 303 psli->iocbq_lookup[iotag] = iocbq;
304 spin_unlock_irq(phba->host->host_lock);
305 iocbq->iotag = iotag;
306 return iotag;
307 }
308 else if (psli->iocbq_lookup_len < (0xffff
309 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
310 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
311 spin_unlock_irq(phba->host->host_lock);
312 new_arr = kmalloc(new_len * sizeof (struct lpfc_iocbq *),
313 GFP_KERNEL);
314 if (new_arr) {
315 memset((char *)new_arr, 0,
316 new_len * sizeof (struct lpfc_iocbq *));
317 spin_lock_irq(phba->host->host_lock);
318 old_arr = psli->iocbq_lookup;
319 if (new_len <= psli->iocbq_lookup_len) {
320 /* highly unprobable case */
321 kfree(new_arr);
322 iotag = psli->last_iotag;
323 if(++iotag < psli->iocbq_lookup_len) {
324 psli->last_iotag = iotag;
325 psli->iocbq_lookup[iotag] = iocbq;
326 spin_unlock_irq(phba->host->host_lock);
327 iocbq->iotag = iotag;
328 return iotag;
329 }
330 spin_unlock_irq(phba->host->host_lock);
331 return 0;
332 }
333 if (psli->iocbq_lookup)
334 memcpy(new_arr, old_arr,
335 ((psli->last_iotag + 1) *
336 sizeof (struct lpfc_iocbq *)));
337 psli->iocbq_lookup = new_arr;
338 psli->iocbq_lookup_len = new_len;
339 psli->last_iotag = iotag;
340 psli->iocbq_lookup[iotag] = iocbq;
341 spin_unlock_irq(phba->host->host_lock);
342 iocbq->iotag = iotag;
343 kfree(old_arr);
344 return iotag;
345 }
278 } 346 }
279 347
280 search_start = pring->iotag_ctr; 348 lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
281 349 "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n",
282 do { 350 phba->brd_no, psli->last_iotag);
283 pring->iotag_ctr++;
284 if (pring->iotag_ctr >= pring->fast_iotag)
285 pring->iotag_ctr = 1;
286
287 if (*(pring->fast_lookup + pring->iotag_ctr) == NULL)
288 return pring->iotag_ctr;
289
290 } while (pring->iotag_ctr != search_start);
291 351
292 /* 352 return 0;
293 * Outstanding I/O count for ring <ringno> is at max <fast_iotag>
294 */
295 lpfc_printf_log(phba,
296 KERN_ERR,
297 LOG_SLI,
298 "%d:0318 Outstanding I/O count for ring %d is at max x%x\n",
299 phba->brd_no,
300 pring->ringno,
301 pring->fast_iotag);
302 return (0);
303} 353}
304 354
305static void 355static void
@@ -307,10 +357,9 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
307 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 357 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
308{ 358{
309 /* 359 /*
310 * Allocate and set up an iotag 360 * Set up an iotag
311 */ 361 */
312 nextiocb->iocb.ulpIoTag = 362 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
313 lpfc_sli_next_iotag(phba, &phba->sli.ring[phba->sli.fcp_ring]);
314 363
315 /* 364 /*
316 * Issue iocb command to adapter 365 * Issue iocb command to adapter
@@ -326,16 +375,15 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
326 */ 375 */
327 if (nextiocb->iocb_cmpl) 376 if (nextiocb->iocb_cmpl)
328 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 377 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
329 else { 378 else
330 list_add_tail(&nextiocb->list, &phba->lpfc_iocb_list); 379 lpfc_sli_release_iocbq(phba, nextiocb);
331 }
332 380
333 /* 381 /*
334 * Let the HBA know what IOCB slot will be the next one the 382 * Let the HBA know what IOCB slot will be the next one the
335 * driver will put a command into. 383 * driver will put a command into.
336 */ 384 */
337 pring->cmdidx = pring->next_cmdidx; 385 pring->cmdidx = pring->next_cmdidx;
338 writeb(pring->cmdidx, phba->MBslimaddr 386 writel(pring->cmdidx, phba->MBslimaddr
339 + (SLIMOFF + (pring->ringno * 2)) * 4); 387 + (SLIMOFF + (pring->ringno * 2)) * 4);
340} 388}
341 389
@@ -752,80 +800,28 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
752} 800}
753 801
754static struct lpfc_iocbq * 802static struct lpfc_iocbq *
755lpfc_sli_txcmpl_ring_search_slow(struct lpfc_sli_ring * pring, 803lpfc_sli_iocbq_lookup(struct lpfc_hba * phba,
756 struct lpfc_iocbq * prspiocb) 804 struct lpfc_sli_ring * pring,
757{ 805 struct lpfc_iocbq * prspiocb)
758 IOCB_t *icmd = NULL;
759 IOCB_t *irsp = NULL;
760 struct lpfc_iocbq *cmd_iocb;
761 struct lpfc_iocbq *iocb, *next_iocb;
762 uint16_t iotag;
763
764 irsp = &prspiocb->iocb;
765 iotag = irsp->ulpIoTag;
766 cmd_iocb = NULL;
767
768 /* Search through txcmpl from the begining */
769 list_for_each_entry_safe(iocb, next_iocb, &(pring->txcmplq), list) {
770 icmd = &iocb->iocb;
771 if (iotag == icmd->ulpIoTag) {
772 /* Found a match. */
773 cmd_iocb = iocb;
774 list_del(&iocb->list);
775 pring->txcmplq_cnt--;
776 break;
777 }
778 }
779
780 return (cmd_iocb);
781}
782
783static struct lpfc_iocbq *
784lpfc_sli_txcmpl_ring_iotag_lookup(struct lpfc_hba * phba,
785 struct lpfc_sli_ring * pring,
786 struct lpfc_iocbq * prspiocb)
787{ 806{
788 IOCB_t *irsp = NULL;
789 struct lpfc_iocbq *cmd_iocb = NULL; 807 struct lpfc_iocbq *cmd_iocb = NULL;
790 uint16_t iotag; 808 uint16_t iotag;
791 809
792 if (unlikely(pring->fast_lookup == NULL)) 810 iotag = prspiocb->iocb.ulpIoTag;
793 return NULL; 811
794 812 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
795 /* Use fast lookup based on iotag for completion */ 813 cmd_iocb = phba->sli.iocbq_lookup[iotag];
796 irsp = &prspiocb->iocb; 814 list_del(&cmd_iocb->list);
797 iotag = irsp->ulpIoTag; 815 pring->txcmplq_cnt--;
798 if (iotag < pring->fast_iotag) { 816 return cmd_iocb;
799 cmd_iocb = *(pring->fast_lookup + iotag);
800 *(pring->fast_lookup + iotag) = NULL;
801 if (cmd_iocb) {
802 list_del(&cmd_iocb->list);
803 pring->txcmplq_cnt--;
804 return cmd_iocb;
805 } else {
806 /*
807 * This is clearly an error. A ring that uses iotags
808 * should never have a interrupt for a completion that
809 * is not on the ring. Return NULL and log a error.
810 */
811 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
812 "%d:0327 Rsp ring %d error - command "
813 "completion for iotag x%x not found\n",
814 phba->brd_no, pring->ringno, iotag);
815 return NULL;
816 }
817 } 817 }
818 818
819 /*
820 * Rsp ring <ringno> get: iotag <iotag> greater then
821 * configured max <fast_iotag> wd0 <irsp>. This is an
822 * error. Just return NULL.
823 */
824 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 819 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
825 "%d:0317 Rsp ring %d get: iotag x%x greater then " 820 "%d:0317 iotag x%x is out off "
826 "configured max x%x wd0 x%x\n", 821 "range: max iotag x%x wd0 x%x\n",
827 phba->brd_no, pring->ringno, iotag, pring->fast_iotag, 822 phba->brd_no, iotag,
828 *(((uint32_t *) irsp) + 7)); 823 phba->sli.last_iotag,
824 *(((uint32_t *) &prspiocb->iocb) + 7));
829 return NULL; 825 return NULL;
830} 826}
831 827
@@ -839,7 +835,7 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
839 835
840 /* Based on the iotag field, get the cmd IOCB from the txcmplq */ 836 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
841 spin_lock_irqsave(phba->host->host_lock, iflag); 837 spin_lock_irqsave(phba->host->host_lock, iflag);
842 cmdiocbp = lpfc_sli_txcmpl_ring_search_slow(pring, saveq); 838 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
843 if (cmdiocbp) { 839 if (cmdiocbp) {
844 if (cmdiocbp->iocb_cmpl) { 840 if (cmdiocbp->iocb_cmpl) {
845 /* 841 /*
@@ -853,17 +849,13 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
853 spin_lock_irqsave(phba->host->host_lock, iflag); 849 spin_lock_irqsave(phba->host->host_lock, iflag);
854 } 850 }
855 else { 851 else {
856 if (cmdiocbp->iocb_flag & LPFC_IO_POLL)
857 rc = 0;
858
859 spin_unlock_irqrestore(phba->host->host_lock, 852 spin_unlock_irqrestore(phba->host->host_lock,
860 iflag); 853 iflag);
861 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 854 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
862 spin_lock_irqsave(phba->host->host_lock, iflag); 855 spin_lock_irqsave(phba->host->host_lock, iflag);
863 } 856 }
864 } else { 857 } else
865 list_add_tail(&cmdiocbp->list, &phba->lpfc_iocb_list); 858 lpfc_sli_release_iocbq(phba, cmdiocbp);
866 }
867 } else { 859 } else {
868 /* 860 /*
869 * Unknown initiating command based on the response iotag. 861 * Unknown initiating command based on the response iotag.
@@ -889,6 +881,7 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
889 saveq->iocb.ulpContext); 881 saveq->iocb.ulpContext);
890 } 882 }
891 } 883 }
884
892 spin_unlock_irqrestore(phba->host->host_lock, iflag); 885 spin_unlock_irqrestore(phba->host->host_lock, iflag);
893 return rc; 886 return rc;
894} 887}
@@ -953,7 +946,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
953 * structure. The copy involves a byte-swap since the 946 * structure. The copy involves a byte-swap since the
954 * network byte order and pci byte orders are different. 947 * network byte order and pci byte orders are different.
955 */ 948 */
956 entry = (IOCB_t *) IOCB_ENTRY(pring->rspringaddr, pring->rspidx); 949 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
957 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 950 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
958 (uint32_t *) &rspiocbq.iocb, 951 (uint32_t *) &rspiocbq.iocb,
959 sizeof (IOCB_t)); 952 sizeof (IOCB_t));
@@ -990,9 +983,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
990 break; 983 break;
991 } 984 }
992 985
993 cmdiocbq = lpfc_sli_txcmpl_ring_iotag_lookup(phba, 986 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
994 pring, 987 &rspiocbq);
995 &rspiocbq);
996 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { 988 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
997 spin_unlock_irqrestore( 989 spin_unlock_irqrestore(
998 phba->host->host_lock, iflag); 990 phba->host->host_lock, iflag);
@@ -1033,7 +1025,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1033 1025
1034 to_slim = phba->MBslimaddr + 1026 to_slim = phba->MBslimaddr +
1035 (SLIMOFF + (pring->ringno * 2) + 1) * 4; 1027 (SLIMOFF + (pring->ringno * 2) + 1) * 4;
1036 writeb(pring->rspidx, to_slim); 1028 writel(pring->rspidx, to_slim);
1037 1029
1038 if (pring->rspidx == portRspPut) 1030 if (pring->rspidx == portRspPut)
1039 portRspPut = le32_to_cpu(pgp->rspPutInx); 1031 portRspPut = le32_to_cpu(pgp->rspPutInx);
@@ -1073,7 +1065,6 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1073 struct lpfc_iocbq *next_iocb; 1065 struct lpfc_iocbq *next_iocb;
1074 struct lpfc_iocbq *cmdiocbp; 1066 struct lpfc_iocbq *cmdiocbp;
1075 struct lpfc_iocbq *saveq; 1067 struct lpfc_iocbq *saveq;
1076 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
1077 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; 1068 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
1078 uint8_t iocb_cmd_type; 1069 uint8_t iocb_cmd_type;
1079 lpfc_iocb_type type; 1070 lpfc_iocb_type type;
@@ -1115,7 +1106,6 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1115 } 1106 }
1116 1107
1117 rmb(); 1108 rmb();
1118 lpfc_iocb_list = &phba->lpfc_iocb_list;
1119 while (pring->rspidx != portRspPut) { 1109 while (pring->rspidx != portRspPut) {
1120 /* 1110 /*
1121 * Build a completion list and call the appropriate handler. 1111 * Build a completion list and call the appropriate handler.
@@ -1131,8 +1121,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1131 * received. 1121 * received.
1132 */ 1122 */
1133 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx); 1123 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
1134 list_remove_head(lpfc_iocb_list, rspiocbp, struct lpfc_iocbq, 1124 rspiocbp = lpfc_sli_get_iocbq(phba);
1135 list);
1136 if (rspiocbp == NULL) { 1125 if (rspiocbp == NULL) {
1137 printk(KERN_ERR "%s: out of buffers! Failing " 1126 printk(KERN_ERR "%s: out of buffers! Failing "
1138 "completion.\n", __FUNCTION__); 1127 "completion.\n", __FUNCTION__);
@@ -1147,7 +1136,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1147 1136
1148 to_slim = phba->MBslimaddr + (SLIMOFF + (pring->ringno * 2) 1137 to_slim = phba->MBslimaddr + (SLIMOFF + (pring->ringno * 2)
1149 + 1) * 4; 1138 + 1) * 4;
1150 writeb(pring->rspidx, to_slim); 1139 writel(pring->rspidx, to_slim);
1151 1140
1152 if (list_empty(&(pring->iocb_continueq))) { 1141 if (list_empty(&(pring->iocb_continueq))) {
1153 list_add(&rspiocbp->list, &(pring->iocb_continueq)); 1142 list_add(&rspiocbp->list, &(pring->iocb_continueq));
@@ -1213,8 +1202,8 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1213 } else if (type == LPFC_ABORT_IOCB) { 1202 } else if (type == LPFC_ABORT_IOCB) {
1214 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) && 1203 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
1215 ((cmdiocbp = 1204 ((cmdiocbp =
1216 lpfc_sli_txcmpl_ring_search_slow(pring, 1205 lpfc_sli_iocbq_lookup(phba, pring,
1217 saveq)))) { 1206 saveq)))) {
1218 /* Call the specified completion 1207 /* Call the specified completion
1219 routine */ 1208 routine */
1220 if (cmdiocbp->iocb_cmpl) { 1209 if (cmdiocbp->iocb_cmpl) {
@@ -1226,10 +1215,9 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1226 spin_lock_irqsave( 1215 spin_lock_irqsave(
1227 phba->host->host_lock, 1216 phba->host->host_lock,
1228 iflag); 1217 iflag);
1229 } else { 1218 } else
1230 list_add_tail(&cmdiocbp->list, 1219 lpfc_sli_release_iocbq(phba,
1231 lpfc_iocb_list); 1220 cmdiocbp);
1232 }
1233 } 1221 }
1234 } else if (type == LPFC_UNKNOWN_IOCB) { 1222 } else if (type == LPFC_UNKNOWN_IOCB) {
1235 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 1223 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
@@ -1264,12 +1252,12 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1264 next_iocb, 1252 next_iocb,
1265 &saveq->list, 1253 &saveq->list,
1266 list) { 1254 list) {
1267 list_add_tail(&rspiocbp->list, 1255 lpfc_sli_release_iocbq(phba,
1268 lpfc_iocb_list); 1256 rspiocbp);
1269 } 1257 }
1270 } 1258 }
1271 1259
1272 list_add_tail(&saveq->list, lpfc_iocb_list); 1260 lpfc_sli_release_iocbq(phba, saveq);
1273 } 1261 }
1274 } 1262 }
1275 1263
@@ -1314,7 +1302,6 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1314 struct lpfc_iocbq *iocb, *next_iocb; 1302 struct lpfc_iocbq *iocb, *next_iocb;
1315 IOCB_t *icmd = NULL, *cmd = NULL; 1303 IOCB_t *icmd = NULL, *cmd = NULL;
1316 int errcnt; 1304 int errcnt;
1317 uint16_t iotag;
1318 1305
1319 errcnt = 0; 1306 errcnt = 0;
1320 1307
@@ -1331,9 +1318,8 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1331 spin_unlock_irq(phba->host->host_lock); 1318 spin_unlock_irq(phba->host->host_lock);
1332 (iocb->iocb_cmpl) (phba, iocb, iocb); 1319 (iocb->iocb_cmpl) (phba, iocb, iocb);
1333 spin_lock_irq(phba->host->host_lock); 1320 spin_lock_irq(phba->host->host_lock);
1334 } else { 1321 } else
1335 list_add_tail(&iocb->list, &phba->lpfc_iocb_list); 1322 lpfc_sli_release_iocbq(phba, iocb);
1336 }
1337 } 1323 }
1338 pring->txq_cnt = 0; 1324 pring->txq_cnt = 0;
1339 INIT_LIST_HEAD(&(pring->txq)); 1325 INIT_LIST_HEAD(&(pring->txq));
@@ -1343,13 +1329,8 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1343 cmd = &iocb->iocb; 1329 cmd = &iocb->iocb;
1344 1330
1345 /* 1331 /*
1346 * Imediate abort of IOCB, clear fast_lookup entry, 1332 * Imediate abort of IOCB, deque and call compl
1347 * if any, deque and call compl
1348 */ 1333 */
1349 iotag = cmd->ulpIoTag;
1350 if (iotag && pring->fast_lookup &&
1351 (iotag < pring->fast_iotag))
1352 pring->fast_lookup[iotag] = NULL;
1353 1334
1354 list_del_init(&iocb->list); 1335 list_del_init(&iocb->list);
1355 pring->txcmplq_cnt--; 1336 pring->txcmplq_cnt--;
@@ -1360,9 +1341,8 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1360 spin_unlock_irq(phba->host->host_lock); 1341 spin_unlock_irq(phba->host->host_lock);
1361 (iocb->iocb_cmpl) (phba, iocb, iocb); 1342 (iocb->iocb_cmpl) (phba, iocb, iocb);
1362 spin_lock_irq(phba->host->host_lock); 1343 spin_lock_irq(phba->host->host_lock);
1363 } else { 1344 } else
1364 list_add_tail(&iocb->list, &phba->lpfc_iocb_list); 1345 lpfc_sli_release_iocbq(phba, iocb);
1365 }
1366 } 1346 }
1367 1347
1368 INIT_LIST_HEAD(&pring->txcmplq); 1348 INIT_LIST_HEAD(&pring->txcmplq);
@@ -2147,6 +2127,10 @@ lpfc_sli_setup(struct lpfc_hba *phba)
2147 psli->next_ring = LPFC_FCP_NEXT_RING; 2127 psli->next_ring = LPFC_FCP_NEXT_RING;
2148 psli->ip_ring = LPFC_IP_RING; 2128 psli->ip_ring = LPFC_IP_RING;
2149 2129
2130 psli->iocbq_lookup = NULL;
2131 psli->iocbq_lookup_len = 0;
2132 psli->last_iotag = 0;
2133
2150 for (i = 0; i < psli->num_rings; i++) { 2134 for (i = 0; i < psli->num_rings; i++) {
2151 pring = &psli->ring[i]; 2135 pring = &psli->ring[i];
2152 switch (i) { 2136 switch (i) {
@@ -2222,7 +2206,7 @@ lpfc_sli_queue_setup(struct lpfc_hba * phba)
2222{ 2206{
2223 struct lpfc_sli *psli; 2207 struct lpfc_sli *psli;
2224 struct lpfc_sli_ring *pring; 2208 struct lpfc_sli_ring *pring;
2225 int i, cnt; 2209 int i;
2226 2210
2227 psli = &phba->sli; 2211 psli = &phba->sli;
2228 spin_lock_irq(phba->host->host_lock); 2212 spin_lock_irq(phba->host->host_lock);
@@ -2238,19 +2222,6 @@ lpfc_sli_queue_setup(struct lpfc_hba * phba)
2238 INIT_LIST_HEAD(&pring->txcmplq); 2222 INIT_LIST_HEAD(&pring->txcmplq);
2239 INIT_LIST_HEAD(&pring->iocb_continueq); 2223 INIT_LIST_HEAD(&pring->iocb_continueq);
2240 INIT_LIST_HEAD(&pring->postbufq); 2224 INIT_LIST_HEAD(&pring->postbufq);
2241 cnt = pring->fast_iotag;
2242 spin_unlock_irq(phba->host->host_lock);
2243 if (cnt) {
2244 pring->fast_lookup =
2245 kmalloc(cnt * sizeof (struct lpfc_iocbq *),
2246 GFP_KERNEL);
2247 if (pring->fast_lookup == 0) {
2248 return (0);
2249 }
2250 memset((char *)pring->fast_lookup, 0,
2251 cnt * sizeof (struct lpfc_iocbq *));
2252 }
2253 spin_lock_irq(phba->host->host_lock);
2254 } 2225 }
2255 spin_unlock_irq(phba->host->host_lock); 2226 spin_unlock_irq(phba->host->host_lock);
2256 return (1); 2227 return (1);
@@ -2292,10 +2263,8 @@ lpfc_sli_hba_down(struct lpfc_hba * phba)
2292 flags); 2263 flags);
2293 (iocb->iocb_cmpl) (phba, iocb, iocb); 2264 (iocb->iocb_cmpl) (phba, iocb, iocb);
2294 spin_lock_irqsave(phba->host->host_lock, flags); 2265 spin_lock_irqsave(phba->host->host_lock, flags);
2295 } else { 2266 } else
2296 list_add_tail(&iocb->list, 2267 lpfc_sli_release_iocbq(phba, iocb);
2297 &phba->lpfc_iocb_list);
2298 }
2299 } 2268 }
2300 2269
2301 INIT_LIST_HEAD(&(pring->txq)); 2270 INIT_LIST_HEAD(&(pring->txq));
@@ -2436,7 +2405,7 @@ lpfc_sli_abort_elsreq_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2436 kfree(buf_ptr); 2405 kfree(buf_ptr);
2437 } 2406 }
2438 2407
2439 list_add_tail(&cmdiocb->list, &phba->lpfc_iocb_list); 2408 lpfc_sli_release_iocbq(phba, cmdiocb);
2440 return; 2409 return;
2441} 2410}
2442 2411
@@ -2445,16 +2414,14 @@ lpfc_sli_issue_abort_iotag32(struct lpfc_hba * phba,
2445 struct lpfc_sli_ring * pring, 2414 struct lpfc_sli_ring * pring,
2446 struct lpfc_iocbq * cmdiocb) 2415 struct lpfc_iocbq * cmdiocb)
2447{ 2416{
2448 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 2417 struct lpfc_iocbq *abtsiocbp;
2449 struct lpfc_iocbq *abtsiocbp = NULL;
2450 IOCB_t *icmd = NULL; 2418 IOCB_t *icmd = NULL;
2451 IOCB_t *iabt = NULL; 2419 IOCB_t *iabt = NULL;
2452 2420
2453 /* issue ABTS for this IOCB based on iotag */ 2421 /* issue ABTS for this IOCB based on iotag */
2454 list_remove_head(lpfc_iocb_list, abtsiocbp, struct lpfc_iocbq, list); 2422 abtsiocbp = lpfc_sli_get_iocbq(phba);
2455 if (abtsiocbp == NULL) 2423 if (abtsiocbp == NULL)
2456 return 0; 2424 return 0;
2457 memset(abtsiocbp, 0, sizeof (struct lpfc_iocbq));
2458 2425
2459 iabt = &abtsiocbp->iocb; 2426 iabt = &abtsiocbp->iocb;
2460 icmd = &cmdiocb->iocb; 2427 icmd = &cmdiocb->iocb;
@@ -2473,7 +2440,7 @@ lpfc_sli_issue_abort_iotag32(struct lpfc_hba * phba,
2473 abtsiocbp->iocb_cmpl = lpfc_sli_abort_elsreq_cmpl; 2440 abtsiocbp->iocb_cmpl = lpfc_sli_abort_elsreq_cmpl;
2474 break; 2441 break;
2475 default: 2442 default:
2476 list_add_tail(&abtsiocbp->list, lpfc_iocb_list); 2443 lpfc_sli_release_iocbq(phba, abtsiocbp);
2477 return 0; 2444 return 0;
2478 } 2445 }
2479 2446
@@ -2485,7 +2452,7 @@ lpfc_sli_issue_abort_iotag32(struct lpfc_hba * phba,
2485 iabt->ulpCommand = CMD_ABORT_MXRI64_CN; 2452 iabt->ulpCommand = CMD_ABORT_MXRI64_CN;
2486 2453
2487 if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) == IOCB_ERROR) { 2454 if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) == IOCB_ERROR) {
2488 list_add_tail(&abtsiocbp->list, lpfc_iocb_list); 2455 lpfc_sli_release_iocbq(phba, abtsiocbp);
2489 return 0; 2456 return 0;
2490 } 2457 }
2491 2458
@@ -2493,28 +2460,37 @@ lpfc_sli_issue_abort_iotag32(struct lpfc_hba * phba,
2493} 2460}
2494 2461
2495static int 2462static int
2496lpfc_sli_validate_iocb_cmd(struct lpfc_scsi_buf *lpfc_cmd, uint16_t tgt_id, 2463lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, uint16_t tgt_id,
2497 uint64_t lun_id, struct lpfc_iocbq *iocb, 2464 uint64_t lun_id, uint32_t ctx,
2498 uint32_t ctx, lpfc_ctx_cmd ctx_cmd) 2465 lpfc_ctx_cmd ctx_cmd)
2499{ 2466{
2467 struct lpfc_scsi_buf *lpfc_cmd;
2468 struct scsi_cmnd *cmnd;
2500 int rc = 1; 2469 int rc = 1;
2501 2470
2502 if (lpfc_cmd == NULL) 2471 if (!(iocbq->iocb_flag & LPFC_IO_FCP))
2472 return rc;
2473
2474 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
2475 cmnd = lpfc_cmd->pCmd;
2476
2477 if (cmnd == NULL)
2503 return rc; 2478 return rc;
2504 2479
2505 switch (ctx_cmd) { 2480 switch (ctx_cmd) {
2506 case LPFC_CTX_LUN: 2481 case LPFC_CTX_LUN:
2507 if ((lpfc_cmd->pCmd->device->id == tgt_id) && 2482 if ((cmnd->device->id == tgt_id) &&
2508 (lpfc_cmd->pCmd->device->lun == lun_id)) 2483 (cmnd->device->lun == lun_id))
2509 rc = 0; 2484 rc = 0;
2510 break; 2485 break;
2511 case LPFC_CTX_TGT: 2486 case LPFC_CTX_TGT:
2512 if (lpfc_cmd->pCmd->device->id == tgt_id) 2487 if (cmnd->device->id == tgt_id)
2513 rc = 0; 2488 rc = 0;
2514 break; 2489 break;
2515 case LPFC_CTX_CTX: 2490 case LPFC_CTX_CTX:
2516 if (iocb->iocb.ulpContext == ctx) 2491 if (iocbq->iocb.ulpContext == ctx)
2517 rc = 0; 2492 rc = 0;
2493 break;
2518 case LPFC_CTX_HOST: 2494 case LPFC_CTX_HOST:
2519 rc = 0; 2495 rc = 0;
2520 break; 2496 break;
@@ -2531,30 +2507,17 @@ int
2531lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2507lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2532 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd) 2508 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd)
2533{ 2509{
2534 struct lpfc_iocbq *iocb, *next_iocb; 2510 struct lpfc_iocbq *iocbq;
2535 IOCB_t *cmd = NULL; 2511 int sum, i;
2536 struct lpfc_scsi_buf *lpfc_cmd;
2537 int sum = 0, ret_val = 0;
2538 2512
2539 /* Next check the txcmplq */ 2513 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
2540 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 2514 iocbq = phba->sli.iocbq_lookup[i];
2541 cmd = &iocb->iocb;
2542
2543 /* Must be a FCP command */
2544 if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
2545 (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
2546 (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
2547 continue;
2548 }
2549 2515
2550 /* context1 MUST be a struct lpfc_scsi_buf */ 2516 if (lpfc_sli_validate_fcp_iocb (iocbq, tgt_id, lun_id,
2551 lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1); 2517 0, ctx_cmd) == 0)
2552 ret_val = lpfc_sli_validate_iocb_cmd(lpfc_cmd, tgt_id, lun_id, 2518 sum++;
2553 NULL, 0, ctx_cmd);
2554 if (ret_val != 0)
2555 continue;
2556 sum++;
2557 } 2519 }
2520
2558 return sum; 2521 return sum;
2559} 2522}
2560 2523
@@ -2563,7 +2526,7 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2563 struct lpfc_iocbq * rspiocb) 2526 struct lpfc_iocbq * rspiocb)
2564{ 2527{
2565 spin_lock_irq(phba->host->host_lock); 2528 spin_lock_irq(phba->host->host_lock);
2566 list_add_tail(&cmdiocb->list, &phba->lpfc_iocb_list); 2529 lpfc_sli_release_iocbq(phba, cmdiocb);
2567 spin_unlock_irq(phba->host->host_lock); 2530 spin_unlock_irq(phba->host->host_lock);
2568 return; 2531 return;
2569} 2532}
@@ -2573,39 +2536,27 @@ lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2573 uint16_t tgt_id, uint64_t lun_id, uint32_t ctx, 2536 uint16_t tgt_id, uint64_t lun_id, uint32_t ctx,
2574 lpfc_ctx_cmd abort_cmd) 2537 lpfc_ctx_cmd abort_cmd)
2575{ 2538{
2576 struct lpfc_iocbq *iocb, *next_iocb; 2539 struct lpfc_iocbq *iocbq;
2577 struct lpfc_iocbq *abtsiocb = NULL; 2540 struct lpfc_iocbq *abtsiocb;
2578 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
2579 IOCB_t *cmd = NULL; 2541 IOCB_t *cmd = NULL;
2580 struct lpfc_scsi_buf *lpfc_cmd;
2581 int errcnt = 0, ret_val = 0; 2542 int errcnt = 0, ret_val = 0;
2543 int i;
2582 2544
2583 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 2545 for (i = 1; i <= phba->sli.last_iotag; i++) {
2584 cmd = &iocb->iocb; 2546 iocbq = phba->sli.iocbq_lookup[i];
2585
2586 /* Must be a FCP command */
2587 if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
2588 (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
2589 (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
2590 continue;
2591 }
2592 2547
2593 /* context1 MUST be a struct lpfc_scsi_buf */ 2548 if (lpfc_sli_validate_fcp_iocb (iocbq, tgt_id, lun_id,
2594 lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1); 2549 0, abort_cmd) != 0)
2595 ret_val = lpfc_sli_validate_iocb_cmd(lpfc_cmd, tgt_id, lun_id,
2596 iocb, ctx, abort_cmd);
2597 if (ret_val != 0)
2598 continue; 2550 continue;
2599 2551
2600 /* issue ABTS for this IOCB based on iotag */ 2552 /* issue ABTS for this IOCB based on iotag */
2601 list_remove_head(lpfc_iocb_list, abtsiocb, struct lpfc_iocbq, 2553 abtsiocb = lpfc_sli_get_iocbq(phba);
2602 list);
2603 if (abtsiocb == NULL) { 2554 if (abtsiocb == NULL) {
2604 errcnt++; 2555 errcnt++;
2605 continue; 2556 continue;
2606 } 2557 }
2607 memset(abtsiocb, 0, sizeof (struct lpfc_iocbq));
2608 2558
2559 cmd = &iocbq->iocb;
2609 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 2560 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
2610 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 2561 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
2611 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 2562 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
@@ -2621,7 +2572,7 @@ lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2621 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 2572 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
2622 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0); 2573 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0);
2623 if (ret_val == IOCB_ERROR) { 2574 if (ret_val == IOCB_ERROR) {
2624 list_add_tail(&abtsiocb->list, lpfc_iocb_list); 2575 lpfc_sli_release_iocbq(phba, abtsiocb);
2625 errcnt++; 2576 errcnt++;
2626 continue; 2577 continue;
2627 } 2578 }
@@ -2630,83 +2581,99 @@ lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2630 return errcnt; 2581 return errcnt;
2631} 2582}
2632 2583
2633void 2584static void
2634lpfc_sli_wake_iocb_high_priority(struct lpfc_hba * phba, 2585lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
2635 struct lpfc_iocbq * queue1, 2586 struct lpfc_iocbq *cmdiocbq,
2636 struct lpfc_iocbq * queue2) 2587 struct lpfc_iocbq *rspiocbq)
2637{ 2588{
2638 if (queue1->context2 && queue2) 2589 wait_queue_head_t *pdone_q;
2639 memcpy(queue1->context2, queue2, sizeof (struct lpfc_iocbq)); 2590 unsigned long iflags;
2591
2592 spin_lock_irqsave(phba->host->host_lock, iflags);
2593 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
2594 if (cmdiocbq->context2 && rspiocbq)
2595 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
2596 &rspiocbq->iocb, sizeof(IOCB_t));
2640 2597
2641 /* The waiter is looking for LPFC_IO_HIPRI bit to be set 2598 pdone_q = cmdiocbq->context_un.wait_queue;
2642 as a signal to wake up */ 2599 spin_unlock_irqrestore(phba->host->host_lock, iflags);
2643 queue1->iocb_flag |= LPFC_IO_HIPRI; 2600 if (pdone_q)
2601 wake_up(pdone_q);
2644 return; 2602 return;
2645} 2603}
2646 2604
2605/*
2606 * Issue the caller's iocb and wait for its completion, but no longer than the
2607 * caller's timeout. Note that iocb_flags is cleared before the
2608 * lpfc_sli_issue_call since the wake routine sets a unique value and by
2609 * definition this is a wait function.
2610 */
2647int 2611int
2648lpfc_sli_issue_iocb_wait_high_priority(struct lpfc_hba * phba, 2612lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
2649 struct lpfc_sli_ring * pring, 2613 struct lpfc_sli_ring * pring,
2650 struct lpfc_iocbq * piocb, 2614 struct lpfc_iocbq * piocb,
2651 uint32_t flag, 2615 struct lpfc_iocbq * prspiocbq,
2652 struct lpfc_iocbq * prspiocbq, 2616 uint32_t timeout)
2653 uint32_t timeout)
2654{ 2617{
2655 int j, delay_time, retval = IOCB_ERROR; 2618 DECLARE_WAIT_QUEUE_HEAD(done_q);
2656 2619 long timeleft, timeout_req = 0;
2657 /* The caller must left context1 empty. */ 2620 int retval = IOCB_SUCCESS;
2658 if (piocb->context_un.hipri_wait_queue != 0) {
2659 return IOCB_ERROR;
2660 }
2661 2621
2662 /* 2622 /*
2663 * If the caller has provided a response iocbq buffer, context2 must 2623 * If the caller has provided a response iocbq buffer, then context2
2664 * be NULL or its an error. 2624 * is NULL or its an error.
2665 */ 2625 */
2666 if (prspiocbq && piocb->context2) { 2626 if (prspiocbq) {
2667 return IOCB_ERROR; 2627 if (piocb->context2)
2628 return IOCB_ERROR;
2629 piocb->context2 = prspiocbq;
2668 } 2630 }
2669 2631
2670 piocb->context2 = prspiocbq; 2632 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
2633 piocb->context_un.wait_queue = &done_q;
2634 piocb->iocb_flag &= ~LPFC_IO_WAKE;
2671 2635
2672 /* Setup callback routine and issue the command. */ 2636 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0);
2673 piocb->iocb_cmpl = lpfc_sli_wake_iocb_high_priority; 2637 if (retval == IOCB_SUCCESS) {
2674 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 2638 timeout_req = timeout * HZ;
2675 flag | SLI_IOCB_HIGH_PRIORITY); 2639 spin_unlock_irq(phba->host->host_lock);
2676 if (retval != IOCB_SUCCESS) { 2640 timeleft = wait_event_timeout(done_q,
2677 piocb->context2 = NULL; 2641 piocb->iocb_flag & LPFC_IO_WAKE,
2678 return IOCB_ERROR; 2642 timeout_req);
2679 } 2643 spin_lock_irq(phba->host->host_lock);
2680
2681 /*
2682 * This high-priority iocb was sent out-of-band. Poll for its
2683 * completion rather than wait for a signal. Note that the host_lock
2684 * is held by the midlayer and must be released here to allow the
2685 * interrupt handlers to complete the IO and signal this routine via
2686 * the iocb_flag.
2687 * Also, the delay_time is computed to be one second longer than
2688 * the scsi command timeout to give the FW time to abort on
2689 * timeout rather than the driver just giving up. Typically,
2690 * the midlayer does not specify a time for this command so the
2691 * driver is free to enforce its own timeout.
2692 */
2693 2644
2694 delay_time = ((timeout + 1) * 1000) >> 6; 2645 if (timeleft == 0) {
2695 retval = IOCB_ERROR; 2646 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2696 spin_unlock_irq(phba->host->host_lock); 2647 "%d:0329 IOCB wait timeout error - no "
2697 for (j = 0; j < 64; j++) { 2648 "wake response Data x%x\n",
2698 msleep(delay_time); 2649 phba->brd_no, timeout);
2699 if (piocb->iocb_flag & LPFC_IO_HIPRI) { 2650 retval = IOCB_TIMEDOUT;
2700 piocb->iocb_flag &= ~LPFC_IO_HIPRI; 2651 } else if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
2701 retval = IOCB_SUCCESS; 2652 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2702 break; 2653 "%d:0330 IOCB wake NOT set, "
2654 "Data x%x x%lx\n", phba->brd_no,
2655 timeout, (timeleft / jiffies));
2656 retval = IOCB_TIMEDOUT;
2657 } else {
2658 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2659 "%d:0331 IOCB wake signaled\n",
2660 phba->brd_no);
2703 } 2661 }
2662 } else {
2663 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2664 "%d:0332 IOCB wait issue failed, Data x%x\n",
2665 phba->brd_no, retval);
2666 retval = IOCB_ERROR;
2704 } 2667 }
2705 2668
2706 spin_lock_irq(phba->host->host_lock); 2669 if (prspiocbq)
2707 piocb->context2 = NULL; 2670 piocb->context2 = NULL;
2671
2672 piocb->context_un.wait_queue = NULL;
2673 piocb->iocb_cmpl = NULL;
2708 return retval; 2674 return retval;
2709} 2675}
2676
2710int 2677int
2711lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq, 2678lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
2712 uint32_t timeout) 2679 uint32_t timeout)