aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorJames Smart <James.Smart@Emulex.Com>2009-10-02 15:16:39 -0400
committerJames Bottomley <James.Bottomley@suse.de>2009-12-04 13:01:39 -0500
commit4d9ab994e214d35107017c342aca42477b137316 (patch)
tree0ee7dd76ce9938eceeac20e4dab287194dc42c41 /drivers/scsi
parent1796e72291b2b6aafaec5954e666d0b5a95da935 (diff)
[SCSI] lpfc 8.3.5: fix reset path, ELS ordering and discovery issues
This patch includes the following fixes: - Fixed panic during HBA reset. - Fixed FCoE event tag passed in resume_rpi. - Fix out of order ELS commands - Fixed discovery issues found during VLAN testing. - Fix UNREG_VPI failure on extended link pull - Fixed crash while processing unsolicited FC frames. - Clear retry count in the delayed ELS handler - Fixed discovery failure during quick link bounce. Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/lpfc/lpfc.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c67
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c51
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c293
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h21
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h13
9 files changed, 242 insertions, 220 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index aa10f7951634..c618eaf3c0c8 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -109,7 +109,7 @@ struct hbq_dmabuf {
109 struct lpfc_dmabuf dbuf; 109 struct lpfc_dmabuf dbuf;
110 uint32_t size; 110 uint32_t size;
111 uint32_t tag; 111 uint32_t tag;
112 struct lpfc_rcqe rcqe; 112 struct lpfc_cq_event cq_event;
113}; 113};
114 114
115/* Priority bit. Set value to exceed low water mark in lpfc_mem. */ 115/* Priority bit. Set value to exceed low water mark in lpfc_mem. */
@@ -551,6 +551,7 @@ struct lpfc_hba {
551 uint8_t fc_linkspeed; /* Link speed after last READ_LA */ 551 uint8_t fc_linkspeed; /* Link speed after last READ_LA */
552 552
553 uint32_t fc_eventTag; /* event tag for link attention */ 553 uint32_t fc_eventTag; /* event tag for link attention */
554 uint32_t link_events;
554 555
555 /* These fields used to be binfo */ 556 /* These fields used to be binfo */
556 uint32_t fc_pref_DID; /* preferred D_ID */ 557 uint32_t fc_pref_DID; /* preferred D_ID */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index e1a30a16a9fa..07f0172674c9 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3815,7 +3815,11 @@ lpfc_get_stats(struct Scsi_Host *shost)
3815 hs->invalid_crc_count -= lso->invalid_crc_count; 3815 hs->invalid_crc_count -= lso->invalid_crc_count;
3816 hs->error_frames -= lso->error_frames; 3816 hs->error_frames -= lso->error_frames;
3817 3817
3818 if (phba->fc_topology == TOPOLOGY_LOOP) { 3818 if (phba->hba_flag & HBA_FCOE_SUPPORT) {
3819 hs->lip_count = -1;
3820 hs->nos_count = (phba->link_events >> 1);
3821 hs->nos_count -= lso->link_events;
3822 } else if (phba->fc_topology == TOPOLOGY_LOOP) {
3819 hs->lip_count = (phba->fc_eventTag >> 1); 3823 hs->lip_count = (phba->fc_eventTag >> 1);
3820 hs->lip_count -= lso->link_events; 3824 hs->lip_count -= lso->link_events;
3821 hs->nos_count = -1; 3825 hs->nos_count = -1;
@@ -3906,7 +3910,10 @@ lpfc_reset_stats(struct Scsi_Host *shost)
3906 lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; 3910 lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
3907 lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt; 3911 lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
3908 lso->error_frames = pmb->un.varRdLnk.crcCnt; 3912 lso->error_frames = pmb->un.varRdLnk.crcCnt;
3909 lso->link_events = (phba->fc_eventTag >> 1); 3913 if (phba->hba_flag & HBA_FCOE_SUPPORT)
3914 lso->link_events = (phba->link_events >> 1);
3915 else
3916 lso->link_events = (phba->fc_eventTag >> 1);
3910 3917
3911 psli->stats_start = get_seconds(); 3918 psli->stats_start = get_seconds();
3912 3919
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 0830f37409a3..4438f8665a4a 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -235,7 +235,7 @@ void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *);
235int lpfc_sli_check_eratt(struct lpfc_hba *); 235int lpfc_sli_check_eratt(struct lpfc_hba *);
236void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, 236void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
237 struct lpfc_sli_ring *, uint32_t); 237 struct lpfc_sli_ring *, uint32_t);
238int lpfc_sli4_handle_received_buffer(struct lpfc_hba *); 238void lpfc_sli4_handle_received_buffer(struct lpfc_hba *, struct hbq_dmabuf *);
239void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 239void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
240int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, 240int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
241 struct lpfc_iocbq *, uint32_t); 241 struct lpfc_iocbq *, uint32_t);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 45337cd23feb..4ea863f50650 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -2452,6 +2452,7 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
2452 */ 2452 */
2453 del_timer_sync(&ndlp->nlp_delayfunc); 2453 del_timer_sync(&ndlp->nlp_delayfunc);
2454 retry = ndlp->nlp_retry; 2454 retry = ndlp->nlp_retry;
2455 ndlp->nlp_retry = 0;
2455 2456
2456 switch (cmd) { 2457 switch (cmd) {
2457 case ELS_CMD_FLOGI: 2458 case ELS_CMD_FLOGI:
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index e6a47e25b218..5073c127bfe1 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -525,8 +525,6 @@ lpfc_work_done(struct lpfc_hba *phba)
525 spin_unlock_irq(&phba->hbalock); 525 spin_unlock_irq(&phba->hbalock);
526 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 526 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
527 } 527 }
528 if (phba->hba_flag & HBA_RECEIVE_BUFFER)
529 lpfc_sli4_handle_received_buffer(phba);
530 } 528 }
531 529
532 vports = lpfc_create_vport_work_array(phba); 530 vports = lpfc_create_vport_work_array(phba);
@@ -568,8 +566,9 @@ lpfc_work_done(struct lpfc_hba *phba)
568 pring = &phba->sli.ring[LPFC_ELS_RING]; 566 pring = &phba->sli.ring[LPFC_ELS_RING];
569 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 567 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
570 status >>= (4*LPFC_ELS_RING); 568 status >>= (4*LPFC_ELS_RING);
571 if ((status & HA_RXMASK) 569 if ((status & HA_RXMASK) ||
572 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) { 570 (pring->flag & LPFC_DEFERRED_RING_EVENT) ||
571 (phba->hba_flag & HBA_RECEIVE_BUFFER)) {
573 if (pring->flag & LPFC_STOP_IOCB_EVENT) { 572 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
574 pring->flag |= LPFC_DEFERRED_RING_EVENT; 573 pring->flag |= LPFC_DEFERRED_RING_EVENT;
575 /* Set the lpfc data pending flag */ 574 /* Set the lpfc data pending flag */
@@ -688,7 +687,8 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
688 lpfc_unreg_rpi(vport, ndlp); 687 lpfc_unreg_rpi(vport, ndlp);
689 688
690 /* Leave Fabric nodes alone on link down */ 689 /* Leave Fabric nodes alone on link down */
691 if (!remove && ndlp->nlp_type & NLP_FABRIC) 690 if ((phba->sli_rev < LPFC_SLI_REV4) &&
691 (!remove && ndlp->nlp_type & NLP_FABRIC))
692 continue; 692 continue;
693 rc = lpfc_disc_state_machine(vport, ndlp, NULL, 693 rc = lpfc_disc_state_machine(vport, ndlp, NULL,
694 remove 694 remove
@@ -1015,10 +1015,10 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1015 mempool_free(mboxq, phba->mbox_mem_pool); 1015 mempool_free(mboxq, phba->mbox_mem_pool);
1016 return; 1016 return;
1017 } 1017 }
1018 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
1019 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1018 if (vport->port_state != LPFC_FLOGI) { 1020 if (vport->port_state != LPFC_FLOGI) {
1019 spin_lock_irqsave(&phba->hbalock, flags); 1021 spin_lock_irqsave(&phba->hbalock, flags);
1020 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
1021 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1022 spin_unlock_irqrestore(&phba->hbalock, flags); 1022 spin_unlock_irqrestore(&phba->hbalock, flags);
1023 lpfc_initial_flogi(vport); 1023 lpfc_initial_flogi(vport);
1024 } 1024 }
@@ -1199,6 +1199,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1199 1199
1200 /* If the FCF is not availabe do nothing. */ 1200 /* If the FCF is not availabe do nothing. */
1201 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { 1201 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
1202 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1202 spin_unlock_irqrestore(&phba->hbalock, flags); 1203 spin_unlock_irqrestore(&phba->hbalock, flags);
1203 return; 1204 return;
1204 } 1205 }
@@ -1216,15 +1217,23 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1216 1217
1217 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, 1218 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool,
1218 GFP_KERNEL); 1219 GFP_KERNEL);
1219 if (!fcf_mbxq) 1220 if (!fcf_mbxq) {
1221 spin_lock_irqsave(&phba->hbalock, flags);
1222 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1223 spin_unlock_irqrestore(&phba->hbalock, flags);
1220 return; 1224 return;
1225 }
1221 1226
1222 lpfc_reg_fcfi(phba, fcf_mbxq); 1227 lpfc_reg_fcfi(phba, fcf_mbxq);
1223 fcf_mbxq->vport = phba->pport; 1228 fcf_mbxq->vport = phba->pport;
1224 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi; 1229 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
1225 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); 1230 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
1226 if (rc == MBX_NOT_FINISHED) 1231 if (rc == MBX_NOT_FINISHED) {
1232 spin_lock_irqsave(&phba->hbalock, flags);
1233 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1234 spin_unlock_irqrestore(&phba->hbalock, flags);
1227 mempool_free(fcf_mbxq, phba->mbox_mem_pool); 1235 mempool_free(fcf_mbxq, phba->mbox_mem_pool);
1236 }
1228 1237
1229 return; 1238 return;
1230} 1239}
@@ -1253,6 +1262,20 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1253 uint16_t *vlan_id) 1262 uint16_t *vlan_id)
1254{ 1263{
1255 struct lpfc_fcf_conn_entry *conn_entry; 1264 struct lpfc_fcf_conn_entry *conn_entry;
1265 int i, j, fcf_vlan_id = 0;
1266
1267 /* Find the lowest VLAN id in the FCF record */
1268 for (i = 0; i < 512; i++) {
1269 if (new_fcf_record->vlan_bitmap[i]) {
1270 fcf_vlan_id = i * 8;
1271 j = 0;
1272 while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) {
1273 j++;
1274 fcf_vlan_id++;
1275 }
1276 break;
1277 }
1278 }
1256 1279
1257 /* If FCF not available return 0 */ 1280 /* If FCF not available return 0 */
1258 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) || 1281 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
@@ -1286,7 +1309,11 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1286 if (*addr_mode & LPFC_FCF_FPMA) 1309 if (*addr_mode & LPFC_FCF_FPMA)
1287 *addr_mode = LPFC_FCF_FPMA; 1310 *addr_mode = LPFC_FCF_FPMA;
1288 1311
1289 *vlan_id = 0xFFFF; 1312 /* If FCF record report a vlan id use that vlan id */
1313 if (fcf_vlan_id)
1314 *vlan_id = fcf_vlan_id;
1315 else
1316 *vlan_id = 0xFFFF;
1290 return 1; 1317 return 1;
1291 } 1318 }
1292 1319
@@ -1384,8 +1411,15 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1384 (*addr_mode & LPFC_FCF_FPMA)) 1411 (*addr_mode & LPFC_FCF_FPMA))
1385 *addr_mode = LPFC_FCF_FPMA; 1412 *addr_mode = LPFC_FCF_FPMA;
1386 1413
1414 /* If matching connect list has a vlan id, use it */
1387 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) 1415 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
1388 *vlan_id = conn_entry->conn_rec.vlan_tag; 1416 *vlan_id = conn_entry->conn_rec.vlan_tag;
1417 /*
1418 * If no vlan id is specified in connect list, use the vlan id
1419 * in the FCF record
1420 */
1421 else if (fcf_vlan_id)
1422 *vlan_id = fcf_vlan_id;
1389 else 1423 else
1390 *vlan_id = 0xFFFF; 1424 *vlan_id = 0xFFFF;
1391 1425
@@ -1423,6 +1457,12 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1423 1457
1424 if (phba->link_state >= LPFC_LINK_UP) 1458 if (phba->link_state >= LPFC_LINK_UP)
1425 lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 1459 lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
1460 else
1461 /*
1462 * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS
1463 * flag
1464 */
1465 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1426 1466
1427 if (unreg_fcf) { 1467 if (unreg_fcf) {
1428 spin_lock_irq(&phba->hbalock); 1468 spin_lock_irq(&phba->hbalock);
@@ -2085,6 +2125,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2085 else 2125 else
2086 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; 2126 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
2087 2127
2128 phba->link_events++;
2088 if (la->attType == AT_LINK_UP && (!la->mm)) { 2129 if (la->attType == AT_LINK_UP && (!la->mm)) {
2089 phba->fc_stat.LinkUp++; 2130 phba->fc_stat.LinkUp++;
2090 if (phba->link_flag & LS_LOOPBACK_MODE) { 2131 if (phba->link_flag & LS_LOOPBACK_MODE) {
@@ -4409,6 +4450,8 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
4409 if (lpfc_fcf_inuse(phba)) 4450 if (lpfc_fcf_inuse(phba))
4410 return; 4451 return;
4411 4452
4453 /* At this point, all discovery is aborted */
4454 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4412 4455
4413 /* Unregister VPIs */ 4456 /* Unregister VPIs */
4414 vports = lpfc_create_vport_work_array(phba); 4457 vports = lpfc_create_vport_work_array(phba);
@@ -4512,8 +4555,10 @@ lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
4512 4555
4513 /* Free the current connect table */ 4556 /* Free the current connect table */
4514 list_for_each_entry_safe(conn_entry, next_conn_entry, 4557 list_for_each_entry_safe(conn_entry, next_conn_entry,
4515 &phba->fcf_conn_rec_list, list) 4558 &phba->fcf_conn_rec_list, list) {
4559 list_del_init(&conn_entry->list);
4516 kfree(conn_entry); 4560 kfree(conn_entry);
4561 }
4517 4562
4518 conn_hdr = (struct lpfc_fcf_conn_hdr *) buff; 4563 conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
4519 record_count = conn_hdr->length * sizeof(uint32_t)/ 4564 record_count = conn_hdr->length * sizeof(uint32_t)/
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index f913f1e93635..d654c0e3db4d 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2919,6 +2919,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2919 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); 2919 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
2920 int rc; 2920 int rc;
2921 2921
2922 phba->fc_eventTag = acqe_fcoe->event_tag;
2922 phba->fcoe_eventtag = acqe_fcoe->event_tag; 2923 phba->fcoe_eventtag = acqe_fcoe->event_tag;
2923 switch (event_type) { 2924 switch (event_type) {
2924 case LPFC_FCOE_EVENT_TYPE_NEW_FCF: 2925 case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
@@ -2990,6 +2991,7 @@ static void
2990lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 2991lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
2991 struct lpfc_acqe_dcbx *acqe_dcbx) 2992 struct lpfc_acqe_dcbx *acqe_dcbx)
2992{ 2993{
2994 phba->fc_eventTag = acqe_dcbx->event_tag;
2993 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2995 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2994 "0290 The SLI4 DCBX asynchronous event is not " 2996 "0290 The SLI4 DCBX asynchronous event is not "
2995 "handled yet\n"); 2997 "handled yet\n");
@@ -3594,8 +3596,10 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
3594 3596
3595 /* Free the current connect table */ 3597 /* Free the current connect table */
3596 list_for_each_entry_safe(conn_entry, next_conn_entry, 3598 list_for_each_entry_safe(conn_entry, next_conn_entry,
3597 &phba->fcf_conn_rec_list, list) 3599 &phba->fcf_conn_rec_list, list) {
3600 list_del_init(&conn_entry->list);
3598 kfree(conn_entry); 3601 kfree(conn_entry);
3602 }
3599 3603
3600 return; 3604 return;
3601} 3605}
@@ -5058,15 +5062,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
5058 } 5062 }
5059 phba->sli4_hba.els_cq = qdesc; 5063 phba->sli4_hba.els_cq = qdesc;
5060 5064
5061 /* Create slow-path Unsolicited Receive Complete Queue */
5062 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5063 phba->sli4_hba.cq_ecount);
5064 if (!qdesc) {
5065 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5066 "0502 Failed allocate slow-path USOL RX CQ\n");
5067 goto out_free_els_cq;
5068 }
5069 phba->sli4_hba.rxq_cq = qdesc;
5070 5065
5071 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ 5066 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
5072 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 5067 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
@@ -5075,7 +5070,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
5075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5070 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5076 "2577 Failed allocate memory for fast-path " 5071 "2577 Failed allocate memory for fast-path "
5077 "CQ record array\n"); 5072 "CQ record array\n");
5078 goto out_free_rxq_cq; 5073 goto out_free_els_cq;
5079 } 5074 }
5080 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 5075 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5081 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5076 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
@@ -5188,9 +5183,6 @@ out_free_fcp_cq:
5188 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; 5183 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
5189 } 5184 }
5190 kfree(phba->sli4_hba.fcp_cq); 5185 kfree(phba->sli4_hba.fcp_cq);
5191out_free_rxq_cq:
5192 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5193 phba->sli4_hba.rxq_cq = NULL;
5194out_free_els_cq: 5186out_free_els_cq:
5195 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5187 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5196 phba->sli4_hba.els_cq = NULL; 5188 phba->sli4_hba.els_cq = NULL;
@@ -5247,10 +5239,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5247 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 5239 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
5248 phba->sli4_hba.dat_rq = NULL; 5240 phba->sli4_hba.dat_rq = NULL;
5249 5241
5250 /* Release unsolicited receive complete queue */
5251 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5252 phba->sli4_hba.rxq_cq = NULL;
5253
5254 /* Release ELS complete queue */ 5242 /* Release ELS complete queue */
5255 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5243 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5256 phba->sli4_hba.els_cq = NULL; 5244 phba->sli4_hba.els_cq = NULL;
@@ -5383,25 +5371,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5383 phba->sli4_hba.els_cq->queue_id, 5371 phba->sli4_hba.els_cq->queue_id,
5384 phba->sli4_hba.sp_eq->queue_id); 5372 phba->sli4_hba.sp_eq->queue_id);
5385 5373
5386 /* Set up slow-path Unsolicited Receive Complete Queue */
5387 if (!phba->sli4_hba.rxq_cq) {
5388 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5389 "0532 USOL RX CQ not allocated\n");
5390 goto out_destroy_els_cq;
5391 }
5392 rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq,
5393 LPFC_RCQ, LPFC_USOL);
5394 if (rc) {
5395 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5396 "0533 Failed setup of slow-path USOL RX CQ: "
5397 "rc = 0x%x\n", rc);
5398 goto out_destroy_els_cq;
5399 }
5400 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5401 "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n",
5402 phba->sli4_hba.rxq_cq->queue_id,
5403 phba->sli4_hba.sp_eq->queue_id);
5404
5405 /* Set up fast-path FCP Response Complete Queue */ 5374 /* Set up fast-path FCP Response Complete Queue */
5406 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 5375 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5407 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 5376 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
@@ -5507,7 +5476,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5507 goto out_destroy_fcp_wq; 5476 goto out_destroy_fcp_wq;
5508 } 5477 }
5509 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 5478 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
5510 phba->sli4_hba.rxq_cq, LPFC_USOL); 5479 phba->sli4_hba.els_cq, LPFC_USOL);
5511 if (rc) { 5480 if (rc) {
5512 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5481 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5513 "0541 Failed setup of Receive Queue: " 5482 "0541 Failed setup of Receive Queue: "
@@ -5519,7 +5488,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5519 "parent cq-id=%d\n", 5488 "parent cq-id=%d\n",
5520 phba->sli4_hba.hdr_rq->queue_id, 5489 phba->sli4_hba.hdr_rq->queue_id,
5521 phba->sli4_hba.dat_rq->queue_id, 5490 phba->sli4_hba.dat_rq->queue_id,
5522 phba->sli4_hba.rxq_cq->queue_id); 5491 phba->sli4_hba.els_cq->queue_id);
5523 return 0; 5492 return 0;
5524 5493
5525out_destroy_fcp_wq: 5494out_destroy_fcp_wq:
@@ -5531,8 +5500,6 @@ out_destroy_mbx_wq:
5531out_destroy_fcp_cq: 5500out_destroy_fcp_cq:
5532 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 5501 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
5533 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 5502 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
5534 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5535out_destroy_els_cq:
5536 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 5503 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5537out_destroy_mbx_cq: 5504out_destroy_mbx_cq:
5538 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 5505 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
@@ -5574,8 +5541,6 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
5574 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 5541 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5575 /* Unset ELS complete queue */ 5542 /* Unset ELS complete queue */
5576 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 5543 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5577 /* Unset unsolicited receive complete queue */
5578 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5579 /* Unset FCP response complete queue */ 5544 /* Unset FCP response complete queue */
5580 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 5545 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5581 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 5546 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 43cbe336f1f8..8d884d8e18be 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -3018,16 +3018,31 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3018 struct lpfc_sli_ring *pring, uint32_t mask) 3018 struct lpfc_sli_ring *pring, uint32_t mask)
3019{ 3019{
3020 struct lpfc_iocbq *irspiocbq; 3020 struct lpfc_iocbq *irspiocbq;
3021 struct hbq_dmabuf *dmabuf;
3022 struct lpfc_cq_event *cq_event;
3021 unsigned long iflag; 3023 unsigned long iflag;
3022 3024
3023 while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) { 3025 while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) {
3024 /* Get the response iocb from the head of work queue */ 3026 /* Get the response iocb from the head of work queue */
3025 spin_lock_irqsave(&phba->hbalock, iflag); 3027 spin_lock_irqsave(&phba->hbalock, iflag);
3026 list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue, 3028 list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue,
3027 irspiocbq, struct lpfc_iocbq, list); 3029 cq_event, struct lpfc_cq_event, list);
3028 spin_unlock_irqrestore(&phba->hbalock, iflag); 3030 spin_unlock_irqrestore(&phba->hbalock, iflag);
3029 /* Process the response iocb */ 3031
3030 lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq); 3032 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3033 case CQE_CODE_COMPL_WQE:
3034 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3035 cq_event);
3036 lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq);
3037 break;
3038 case CQE_CODE_RECEIVE:
3039 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3040 cq_event);
3041 lpfc_sli4_handle_received_buffer(phba, dmabuf);
3042 break;
3043 default:
3044 break;
3045 }
3031 } 3046 }
3032} 3047}
3033 3048
@@ -3416,6 +3431,7 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
3416 3431
3417 /* perform board reset */ 3432 /* perform board reset */
3418 phba->fc_eventTag = 0; 3433 phba->fc_eventTag = 0;
3434 phba->link_events = 0;
3419 phba->pport->fc_myDID = 0; 3435 phba->pport->fc_myDID = 0;
3420 phba->pport->fc_prevDID = 0; 3436 phba->pport->fc_prevDID = 0;
3421 3437
@@ -3476,6 +3492,7 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
3476 3492
3477 /* perform board reset */ 3493 /* perform board reset */
3478 phba->fc_eventTag = 0; 3494 phba->fc_eventTag = 0;
3495 phba->link_events = 0;
3479 phba->pport->fc_myDID = 0; 3496 phba->pport->fc_myDID = 0;
3480 phba->pport->fc_prevDID = 0; 3497 phba->pport->fc_prevDID = 0;
3481 3498
@@ -3495,7 +3512,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
3495 list_del_init(&phba->sli4_hba.dat_rq->list); 3512 list_del_init(&phba->sli4_hba.dat_rq->list);
3496 list_del_init(&phba->sli4_hba.mbx_cq->list); 3513 list_del_init(&phba->sli4_hba.mbx_cq->list);
3497 list_del_init(&phba->sli4_hba.els_cq->list); 3514 list_del_init(&phba->sli4_hba.els_cq->list);
3498 list_del_init(&phba->sli4_hba.rxq_cq->list);
3499 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++) 3515 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
3500 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list); 3516 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
3501 for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++) 3517 for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
@@ -4243,7 +4259,6 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4243 4259
4244 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); 4260 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4245 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 4261 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
4246 lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM);
4247 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) 4262 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4248 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], 4263 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4249 LPFC_QUEUE_REARM); 4264 LPFC_QUEUE_REARM);
@@ -8351,8 +8366,7 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
8351 8366
8352 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, 8367 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
8353 sizeof(struct lpfc_iocbq) - offset); 8368 sizeof(struct lpfc_iocbq) - offset);
8354 memset(&pIocbIn->sli4_info, 0, 8369 pIocbIn->cq_event.cqe.wcqe_cmpl = *wcqe;
8355 sizeof(struct lpfc_sli4_rspiocb_info));
8356 /* Map WCQE parameters into irspiocb parameters */ 8370 /* Map WCQE parameters into irspiocb parameters */
8357 pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe); 8371 pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
8358 if (pIocbOut->iocb_flag & LPFC_IO_FCP) 8372 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
@@ -8364,16 +8378,6 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
8364 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 8378 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8365 else 8379 else
8366 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 8380 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8367 /* Load in additional WCQE parameters */
8368 pIocbIn->sli4_info.hw_status = bf_get(lpfc_wcqe_c_hw_status, wcqe);
8369 pIocbIn->sli4_info.bfield = 0;
8370 if (bf_get(lpfc_wcqe_c_xb, wcqe))
8371 pIocbIn->sli4_info.bfield |= LPFC_XB;
8372 if (bf_get(lpfc_wcqe_c_pv, wcqe)) {
8373 pIocbIn->sli4_info.bfield |= LPFC_PV;
8374 pIocbIn->sli4_info.priority =
8375 bf_get(lpfc_wcqe_c_priority, wcqe);
8376 }
8377} 8381}
8378 8382
8379/** 8383/**
@@ -8598,7 +8602,8 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
8598 8602
8599 /* Add the irspiocb to the response IOCB work list */ 8603 /* Add the irspiocb to the response IOCB work list */
8600 spin_lock_irqsave(&phba->hbalock, iflags); 8604 spin_lock_irqsave(&phba->hbalock, iflags);
8601 list_add_tail(&irspiocbq->list, &phba->sli4_hba.sp_rspiocb_work_queue); 8605 list_add_tail(&irspiocbq->cq_event.list,
8606 &phba->sli4_hba.sp_rspiocb_work_queue);
8602 /* Indicate ELS ring attention */ 8607 /* Indicate ELS ring attention */
8603 phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING)); 8608 phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING));
8604 spin_unlock_irqrestore(&phba->hbalock, iflags); 8609 spin_unlock_irqrestore(&phba->hbalock, iflags);
@@ -8690,52 +8695,6 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
8690} 8695}
8691 8696
8692/** 8697/**
8693 * lpfc_sli4_sp_handle_wcqe - Process a work-queue completion queue entry
8694 * @phba: Pointer to HBA context object.
8695 * @cq: Pointer to the completion queue.
8696 * @wcqe: Pointer to a completion queue entry.
8697 *
8698 * This routine process a slow-path work-queue completion queue entry.
8699 *
8700 * Return: true if work posted to worker thread, otherwise false.
8701 **/
8702static bool
8703lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8704 struct lpfc_cqe *cqe)
8705{
8706 struct lpfc_wcqe_complete wcqe;
8707 bool workposted = false;
8708
8709 /* Copy the work queue CQE and convert endian order if needed */
8710 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
8711
8712 /* Check and process for different type of WCQE and dispatch */
8713 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
8714 case CQE_CODE_COMPL_WQE:
8715 /* Process the WQ complete event */
8716 workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
8717 (struct lpfc_wcqe_complete *)&wcqe);
8718 break;
8719 case CQE_CODE_RELEASE_WQE:
8720 /* Process the WQ release event */
8721 lpfc_sli4_sp_handle_rel_wcqe(phba,
8722 (struct lpfc_wcqe_release *)&wcqe);
8723 break;
8724 case CQE_CODE_XRI_ABORTED:
8725 /* Process the WQ XRI abort event */
8726 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
8727 (struct sli4_wcqe_xri_aborted *)&wcqe);
8728 break;
8729 default:
8730 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8731 "0388 Not a valid WCQE code: x%x\n",
8732 bf_get(lpfc_wcqe_c_code, &wcqe));
8733 break;
8734 }
8735 return workposted;
8736}
8737
8738/**
8739 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry 8698 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
8740 * @phba: Pointer to HBA context object. 8699 * @phba: Pointer to HBA context object.
8741 * @rcqe: Pointer to receive-queue completion queue entry. 8700 * @rcqe: Pointer to receive-queue completion queue entry.
@@ -8745,9 +8704,8 @@ lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8745 * Return: true if work posted to worker thread, otherwise false. 8704 * Return: true if work posted to worker thread, otherwise false.
8746 **/ 8705 **/
8747static bool 8706static bool
8748lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) 8707lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
8749{ 8708{
8750 struct lpfc_rcqe rcqe;
8751 bool workposted = false; 8709 bool workposted = false;
8752 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 8710 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
8753 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 8711 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
@@ -8755,15 +8713,13 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
8755 uint32_t status; 8713 uint32_t status;
8756 unsigned long iflags; 8714 unsigned long iflags;
8757 8715
8758 /* Copy the receive queue CQE and convert endian order if needed */
8759 lpfc_sli_pcimem_bcopy(cqe, &rcqe, sizeof(struct lpfc_rcqe));
8760 lpfc_sli4_rq_release(hrq, drq); 8716 lpfc_sli4_rq_release(hrq, drq);
8761 if (bf_get(lpfc_rcqe_code, &rcqe) != CQE_CODE_RECEIVE) 8717 if (bf_get(lpfc_rcqe_code, rcqe) != CQE_CODE_RECEIVE)
8762 goto out; 8718 goto out;
8763 if (bf_get(lpfc_rcqe_rq_id, &rcqe) != hrq->queue_id) 8719 if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id)
8764 goto out; 8720 goto out;
8765 8721
8766 status = bf_get(lpfc_rcqe_status, &rcqe); 8722 status = bf_get(lpfc_rcqe_status, rcqe);
8767 switch (status) { 8723 switch (status) {
8768 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 8724 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
8769 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8725 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -8775,9 +8731,10 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
8775 spin_unlock_irqrestore(&phba->hbalock, iflags); 8731 spin_unlock_irqrestore(&phba->hbalock, iflags);
8776 goto out; 8732 goto out;
8777 } 8733 }
8778 memcpy(&dma_buf->rcqe, &rcqe, sizeof(rcqe)); 8734 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
8779 /* save off the frame for the word thread to process */ 8735 /* save off the frame for the word thread to process */
8780 list_add_tail(&dma_buf->dbuf.list, &phba->rb_pend_list); 8736 list_add_tail(&dma_buf->cq_event.list,
8737 &phba->sli4_hba.sp_rspiocb_work_queue);
8781 /* Frame received */ 8738 /* Frame received */
8782 phba->hba_flag |= HBA_RECEIVE_BUFFER; 8739 phba->hba_flag |= HBA_RECEIVE_BUFFER;
8783 spin_unlock_irqrestore(&phba->hbalock, iflags); 8740 spin_unlock_irqrestore(&phba->hbalock, iflags);
@@ -8798,6 +8755,58 @@ out:
8798} 8755}
8799 8756
8800/** 8757/**
8758 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
8759 * @phba: Pointer to HBA context object.
8760 * @cq: Pointer to the completion queue.
8761 * @wcqe: Pointer to a completion queue entry.
8762 *
8763 * This routine process a slow-path work-queue or recieve queue completion queue
8764 * entry.
8765 *
8766 * Return: true if work posted to worker thread, otherwise false.
8767 **/
8768static bool
8769lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8770 struct lpfc_cqe *cqe)
8771{
8772 struct lpfc_wcqe_complete wcqe;
8773 bool workposted = false;
8774
8775 /* Copy the work queue CQE and convert endian order if needed */
8776 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
8777
8778 /* Check and process for different type of WCQE and dispatch */
8779 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
8780 case CQE_CODE_COMPL_WQE:
8781 /* Process the WQ complete event */
8782 workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
8783 (struct lpfc_wcqe_complete *)&wcqe);
8784 break;
8785 case CQE_CODE_RELEASE_WQE:
8786 /* Process the WQ release event */
8787 lpfc_sli4_sp_handle_rel_wcqe(phba,
8788 (struct lpfc_wcqe_release *)&wcqe);
8789 break;
8790 case CQE_CODE_XRI_ABORTED:
8791 /* Process the WQ XRI abort event */
8792 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
8793 (struct sli4_wcqe_xri_aborted *)&wcqe);
8794 break;
8795 case CQE_CODE_RECEIVE:
8796 /* Process the RQ event */
8797 workposted = lpfc_sli4_sp_handle_rcqe(phba,
8798 (struct lpfc_rcqe *)&wcqe);
8799 break;
8800 default:
8801 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8802 "0388 Not a valid WCQE code: x%x\n",
8803 bf_get(lpfc_wcqe_c_code, &wcqe));
8804 break;
8805 }
8806 return workposted;
8807}
8808
8809/**
8801 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry 8810 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
8802 * @phba: Pointer to HBA context object. 8811 * @phba: Pointer to HBA context object.
8803 * @eqe: Pointer to fast-path event queue entry. 8812 * @eqe: Pointer to fast-path event queue entry.
@@ -8858,14 +8867,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
8858 break; 8867 break;
8859 case LPFC_WCQ: 8868 case LPFC_WCQ:
8860 while ((cqe = lpfc_sli4_cq_get(cq))) { 8869 while ((cqe = lpfc_sli4_cq_get(cq))) {
8861 workposted |= lpfc_sli4_sp_handle_wcqe(phba, cq, cqe); 8870 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, cqe);
8862 if (!(++ecount % LPFC_GET_QE_REL_INT))
8863 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8864 }
8865 break;
8866 case LPFC_RCQ:
8867 while ((cqe = lpfc_sli4_cq_get(cq))) {
8868 workposted |= lpfc_sli4_sp_handle_rcqe(phba, cqe);
8869 if (!(++ecount % LPFC_GET_QE_REL_INT)) 8871 if (!(++ecount % LPFC_GET_QE_REL_INT))
8870 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 8872 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8871 } 8873 }
@@ -10823,6 +10825,7 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10823 struct hbq_dmabuf *seq_dmabuf = NULL; 10825 struct hbq_dmabuf *seq_dmabuf = NULL;
10824 struct hbq_dmabuf *temp_dmabuf = NULL; 10826 struct hbq_dmabuf *temp_dmabuf = NULL;
10825 10827
10828 INIT_LIST_HEAD(&dmabuf->dbuf.list);
10826 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 10829 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10827 /* Use the hdr_buf to find the sequence that this frame belongs to */ 10830 /* Use the hdr_buf to find the sequence that this frame belongs to */
10828 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 10831 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
@@ -10845,7 +10848,9 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10845 } 10848 }
10846 temp_hdr = seq_dmabuf->hbuf.virt; 10849 temp_hdr = seq_dmabuf->hbuf.virt;
10847 if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) { 10850 if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) {
10848 list_add(&seq_dmabuf->dbuf.list, &dmabuf->dbuf.list); 10851 list_del_init(&seq_dmabuf->hbuf.list);
10852 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
10853 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
10849 return dmabuf; 10854 return dmabuf;
10850 } 10855 }
10851 /* find the correct place in the sequence to insert this frame */ 10856 /* find the correct place in the sequence to insert this frame */
@@ -10957,7 +10962,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10957 LPFC_DATA_BUF_SIZE; 10962 LPFC_DATA_BUF_SIZE;
10958 first_iocbq->iocb.un.rcvels.remoteID = sid; 10963 first_iocbq->iocb.un.rcvels.remoteID = sid;
10959 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 10964 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
10960 bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe); 10965 bf_get(lpfc_rcqe_length,
10966 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
10961 } 10967 }
10962 iocbq = first_iocbq; 10968 iocbq = first_iocbq;
10963 /* 10969 /*
@@ -10975,7 +10981,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10975 iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize = 10981 iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize =
10976 LPFC_DATA_BUF_SIZE; 10982 LPFC_DATA_BUF_SIZE;
10977 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 10983 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
10978 bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe); 10984 bf_get(lpfc_rcqe_length,
10985 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
10979 } else { 10986 } else {
10980 iocbq = lpfc_sli_get_iocbq(vport->phba); 10987 iocbq = lpfc_sli_get_iocbq(vport->phba);
10981 if (!iocbq) { 10988 if (!iocbq) {
@@ -10994,7 +11001,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10994 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 11001 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
10995 LPFC_DATA_BUF_SIZE; 11002 LPFC_DATA_BUF_SIZE;
10996 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 11003 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
10997 bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe); 11004 bf_get(lpfc_rcqe_length,
11005 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
10998 iocbq->iocb.un.rcvels.remoteID = sid; 11006 iocbq->iocb.un.rcvels.remoteID = sid;
10999 list_add_tail(&iocbq->list, &first_iocbq->list); 11007 list_add_tail(&iocbq->list, &first_iocbq->list);
11000 } 11008 }
@@ -11014,11 +11022,11 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
11014 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the 11022 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
11015 * appropriate receive function when the final frame in a sequence is received. 11023 * appropriate receive function when the final frame in a sequence is received.
11016 **/ 11024 **/
11017int 11025void
11018lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba) 11026lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
11027 struct hbq_dmabuf *dmabuf)
11019{ 11028{
11020 LIST_HEAD(cmplq); 11029 struct hbq_dmabuf *seq_dmabuf;
11021 struct hbq_dmabuf *dmabuf, *seq_dmabuf;
11022 struct fc_frame_header *fc_hdr; 11030 struct fc_frame_header *fc_hdr;
11023 struct lpfc_vport *vport; 11031 struct lpfc_vport *vport;
11024 uint32_t fcfi; 11032 uint32_t fcfi;
@@ -11027,54 +11035,50 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba)
11027 /* Clear hba flag and get all received buffers into the cmplq */ 11035 /* Clear hba flag and get all received buffers into the cmplq */
11028 spin_lock_irq(&phba->hbalock); 11036 spin_lock_irq(&phba->hbalock);
11029 phba->hba_flag &= ~HBA_RECEIVE_BUFFER; 11037 phba->hba_flag &= ~HBA_RECEIVE_BUFFER;
11030 list_splice_init(&phba->rb_pend_list, &cmplq);
11031 spin_unlock_irq(&phba->hbalock); 11038 spin_unlock_irq(&phba->hbalock);
11032 11039
11033 /* Process each received buffer */ 11040 /* Process each received buffer */
11034 while ((dmabuf = lpfc_sli_hbqbuf_get(&cmplq)) != NULL) { 11041 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
11035 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 11042 /* check to see if this a valid type of frame */
11036 /* check to see if this a valid type of frame */ 11043 if (lpfc_fc_frame_check(phba, fc_hdr)) {
11037 if (lpfc_fc_frame_check(phba, fc_hdr)) { 11044 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11038 lpfc_in_buf_free(phba, &dmabuf->dbuf); 11045 return;
11039 continue; 11046 }
11040 } 11047 fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl);
11041 fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->rcqe); 11048 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
11042 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); 11049 if (!vport) {
11043 if (!vport) { 11050 /* throw out the frame */
11044 /* throw out the frame */ 11051 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11045 lpfc_in_buf_free(phba, &dmabuf->dbuf); 11052 return;
11046 continue; 11053 }
11047 } 11054 /* Link this frame */
11048 /* Link this frame */ 11055 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
11049 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); 11056 if (!seq_dmabuf) {
11050 if (!seq_dmabuf) { 11057 /* unable to add frame to vport - throw it out */
11051 /* unable to add frame to vport - throw it out */ 11058 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11052 lpfc_in_buf_free(phba, &dmabuf->dbuf); 11059 return;
11053 continue; 11060 }
11054 } 11061 /* If not last frame in sequence continue processing frames. */
11055 /* If not last frame in sequence continue processing frames. */ 11062 if (!lpfc_seq_complete(seq_dmabuf)) {
11056 if (!lpfc_seq_complete(seq_dmabuf)) { 11063 /*
11057 /* 11064 * When saving off frames post a new one and mark this
11058 * When saving off frames post a new one and mark this 11065 * frame to be freed when it is finished.
11059 * frame to be freed when it is finished. 11066 **/
11060 **/ 11067 lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1);
11061 lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1); 11068 dmabuf->tag = -1;
11062 dmabuf->tag = -1; 11069 return;
11063 continue; 11070 }
11064 } 11071 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
11065 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 11072 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
11066 iocbq = lpfc_prep_seq(vport, seq_dmabuf); 11073 if (!lpfc_complete_unsol_iocb(phba,
11067 if (!lpfc_complete_unsol_iocb(phba, 11074 &phba->sli.ring[LPFC_ELS_RING],
11068 &phba->sli.ring[LPFC_ELS_RING], 11075 iocbq, fc_hdr->fh_r_ctl,
11069 iocbq, fc_hdr->fh_r_ctl, 11076 fc_hdr->fh_type))
11070 fc_hdr->fh_type)) 11077 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11071 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11078 "2540 Ring %d handler: unexpected Rctl "
11072 "2540 Ring %d handler: unexpected Rctl " 11079 "x%x Type x%x received\n",
11073 "x%x Type x%x received\n", 11080 LPFC_ELS_RING,
11074 LPFC_ELS_RING, 11081 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
11075 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
11076 };
11077 return 0;
11078} 11082}
11079 11083
11080/** 11084/**
@@ -11542,7 +11546,8 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11546 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11543 "2000 Failed to allocate mbox for " 11547 "2000 Failed to allocate mbox for "
11544 "READ_FCF cmd\n"); 11548 "READ_FCF cmd\n");
11545 return -ENOMEM; 11549 error = -ENOMEM;
11550 goto fail_fcfscan;
11546 } 11551 }
11547 11552
11548 req_len = sizeof(struct fcf_record) + 11553 req_len = sizeof(struct fcf_record) +
@@ -11558,8 +11563,8 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11558 "0291 Allocated DMA memory size (x%x) is " 11563 "0291 Allocated DMA memory size (x%x) is "
11559 "less than the requested DMA memory " 11564 "less than the requested DMA memory "
11560 "size (x%x)\n", alloc_len, req_len); 11565 "size (x%x)\n", alloc_len, req_len);
11561 lpfc_sli4_mbox_cmd_free(phba, mboxq); 11566 error = -ENOMEM;
11562 return -ENOMEM; 11567 goto fail_fcfscan;
11563 } 11568 }
11564 11569
11565 /* Get the first SGE entry from the non-embedded DMA memory. This 11570 /* Get the first SGE entry from the non-embedded DMA memory. This
@@ -11571,8 +11576,8 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11571 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 11576 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11572 "2527 Failed to get the non-embedded SGE " 11577 "2527 Failed to get the non-embedded SGE "
11573 "virtual address\n"); 11578 "virtual address\n");
11574 lpfc_sli4_mbox_cmd_free(phba, mboxq); 11579 error = -ENOMEM;
11575 return -ENOMEM; 11580 goto fail_fcfscan;
11576 } 11581 }
11577 virt_addr = mboxq->sge_array->addr[0]; 11582 virt_addr = mboxq->sge_array->addr[0];
11578 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; 11583 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
@@ -11586,7 +11591,6 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11586 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record; 11591 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record;
11587 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 11592 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11588 if (rc == MBX_NOT_FINISHED) { 11593 if (rc == MBX_NOT_FINISHED) {
11589 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11590 error = -EIO; 11594 error = -EIO;
11591 } else { 11595 } else {
11592 spin_lock_irq(&phba->hbalock); 11596 spin_lock_irq(&phba->hbalock);
@@ -11594,6 +11598,15 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11594 spin_unlock_irq(&phba->hbalock); 11598 spin_unlock_irq(&phba->hbalock);
11595 error = 0; 11599 error = 0;
11596 } 11600 }
11601fail_fcfscan:
11602 if (error) {
11603 if (mboxq)
11604 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11605 /* FCF scan failed, clear FCF_DISC_INPROGRESS flag */
11606 spin_lock_irq(&phba->hbalock);
11607 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
11608 spin_unlock_irq(&phba->hbalock);
11609 }
11597 return error; 11610 return error;
11598} 11611}
11599 11612
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 3c53316cf6d0..ad8094966ff3 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -29,14 +29,17 @@ typedef enum _lpfc_ctx_cmd {
29 LPFC_CTX_HOST 29 LPFC_CTX_HOST
30} lpfc_ctx_cmd; 30} lpfc_ctx_cmd;
31 31
32/* This structure is used to carry the needed response IOCB states */ 32struct lpfc_cq_event {
33struct lpfc_sli4_rspiocb_info { 33 struct list_head list;
34 uint8_t hw_status; 34 union {
35 uint8_t bfield; 35 struct lpfc_mcqe mcqe_cmpl;
36#define LPFC_XB 0x1 36 struct lpfc_acqe_link acqe_link;
37#define LPFC_PV 0x2 37 struct lpfc_acqe_fcoe acqe_fcoe;
38 uint8_t priority; 38 struct lpfc_acqe_dcbx acqe_dcbx;
39 uint8_t reserved; 39 struct lpfc_rcqe rcqe_cmpl;
40 struct sli4_wcqe_xri_aborted wcqe_axri;
41 struct lpfc_wcqe_complete wcqe_cmpl;
42 } cqe;
40}; 43};
41 44
42/* This structure is used to handle IOCB requests / responses */ 45/* This structure is used to handle IOCB requests / responses */
@@ -76,7 +79,7 @@ struct lpfc_iocbq {
76 struct lpfc_iocbq *); 79 struct lpfc_iocbq *);
77 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 80 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
78 struct lpfc_iocbq *); 81 struct lpfc_iocbq *);
79 struct lpfc_sli4_rspiocb_info sli4_info; 82 struct lpfc_cq_event cq_event;
80}; 83};
81 84
82#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */ 85#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index b5f4ba1a5c27..97da7589e038 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -110,18 +110,6 @@ struct lpfc_queue {
110 union sli4_qe qe[1]; /* array to index entries (must be last) */ 110 union sli4_qe qe[1]; /* array to index entries (must be last) */
111}; 111};
112 112
113struct lpfc_cq_event {
114 struct list_head list;
115 union {
116 struct lpfc_mcqe mcqe_cmpl;
117 struct lpfc_acqe_link acqe_link;
118 struct lpfc_acqe_fcoe acqe_fcoe;
119 struct lpfc_acqe_dcbx acqe_dcbx;
120 struct lpfc_rcqe rcqe_cmpl;
121 struct sli4_wcqe_xri_aborted wcqe_axri;
122 } cqe;
123};
124
125struct lpfc_sli4_link { 113struct lpfc_sli4_link {
126 uint8_t speed; 114 uint8_t speed;
127 uint8_t duplex; 115 uint8_t duplex;
@@ -325,7 +313,6 @@ struct lpfc_sli4_hba {
325 struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */ 313 struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
326 struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */ 314 struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
327 struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */ 315 struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
328 struct lpfc_queue *rxq_cq; /* Slow-path unsolicited complete queue */
329 316
330 /* Setup information for various queue parameters */ 317 /* Setup information for various queue parameters */
331 int eq_esize; 318 int eq_esize;