aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-28 15:43:43 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-28 15:43:43 -0500
commit21f3b24da9328415792efc780f50b9f434c12465 (patch)
tree446ad6d2154e0f05bcb079cb99a144102c682eb9 /drivers/scsi/lpfc
parent2a7d2b96d5cba7568139d9ab157a0e97ab32440f (diff)
parent2b4df6ea53d05625e9ca2dd73bc0e831976e009d (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull first round of SCSI updates from James Bottomley: "The patch set is mostly driver updates (bnx2fc, ipr, lpfc, qla4) and a few bug fixes" Pull delayed because google hates James, and sneakily considers his pull requests spam. Why, google, why? * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (60 commits) [SCSI] aacraid: 1024 max outstanding command support for Series 7 and above [SCSI] bnx2fc: adjust duplicate test [SCSI] qla4xxx: Update driver version to 5.03.00-k4 [SCSI] qla4xxx: Fix return code for qla4xxx_session_get_param. [SCSI] qla4xxx: wait for boot target login response during probe. [SCSI] qla4xxx: Added support for force firmware dump [SCSI] qla4xxx: Re-register IRQ handler while retrying initialize of adapter [SCSI] qla4xxx: Throttle active IOCBs to firmware limits [SCSI] qla4xxx: Remove unnecessary code from qla4xxx_init_local_data [SCSI] qla4xxx: Quiesce driver activities while loopback [SCSI] qla4xxx: Rename MBOX_ASTS_IDC_NOTIFY to MBOX_ASTS_IDC_REQUEST_NOTIFICATION [SCSI] qla4xxx: Add spurious interrupt messages under debug level 2 [SCSI] cxgb4i: Remove the scsi host device when removing device [SCSI] bfa: fix strncpy() limiter in bfad_start_ops() [SCSI] qla4xxx: Update driver version to 5.03.00-k3 [SCSI] qla4xxx: Correct the validation to check in get_sys_info mailbox [SCSI] qla4xxx: Pass correct function param to qla4_8xxx_rd_direct [SCSI] lpfc 8.3.37: Update lpfc version for 8.3.37 driver release [SCSI] lpfc 8.3.37: Fixed infinite loop in lpfc_sli4_fcf_rr_next_index_get. [SCSI] lpfc 8.3.37: Fixed crash due to SLI Port invalid resource count ...
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc.h17
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c51
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c37
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h176
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c251
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c378
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
13 files changed, 657 insertions, 328 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index df4c13a5534c..7706c99ec8bb 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -466,11 +466,13 @@ enum intr_type_t {
466 MSIX, 466 MSIX,
467}; 467};
468 468
469#define LPFC_CT_CTX_MAX 64
469struct unsol_rcv_ct_ctx { 470struct unsol_rcv_ct_ctx {
470 uint32_t ctxt_id; 471 uint32_t ctxt_id;
471 uint32_t SID; 472 uint32_t SID;
472 uint32_t flags; 473 uint32_t valid;
473#define UNSOL_VALID 0x00000001 474#define UNSOL_INVALID 0
475#define UNSOL_VALID 1
474 uint16_t oxid; 476 uint16_t oxid;
475 uint16_t rxid; 477 uint16_t rxid;
476}; 478};
@@ -750,6 +752,15 @@ struct lpfc_hba {
750 void __iomem *ctrl_regs_memmap_p;/* Kernel memory mapped address for 752 void __iomem *ctrl_regs_memmap_p;/* Kernel memory mapped address for
751 PCI BAR2 */ 753 PCI BAR2 */
752 754
755 void __iomem *pci_bar0_memmap_p; /* Kernel memory mapped address for
756 PCI BAR0 with dual-ULP support */
757 void __iomem *pci_bar2_memmap_p; /* Kernel memory mapped address for
758 PCI BAR2 with dual-ULP support */
759 void __iomem *pci_bar4_memmap_p; /* Kernel memory mapped address for
760 PCI BAR4 with dual-ULP support */
761#define PCI_64BIT_BAR0 0
762#define PCI_64BIT_BAR2 2
763#define PCI_64BIT_BAR4 4
753 void __iomem *MBslimaddr; /* virtual address for mbox cmds */ 764 void __iomem *MBslimaddr; /* virtual address for mbox cmds */
754 void __iomem *HAregaddr; /* virtual address for host attn reg */ 765 void __iomem *HAregaddr; /* virtual address for host attn reg */
755 void __iomem *CAregaddr; /* virtual address for chip attn reg */ 766 void __iomem *CAregaddr; /* virtual address for chip attn reg */
@@ -938,7 +949,7 @@ struct lpfc_hba {
938 949
939 spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */ 950 spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */
940 struct list_head ct_ev_waiters; 951 struct list_head ct_ev_waiters;
941 struct unsol_rcv_ct_ctx ct_ctx[64]; 952 struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX];
942 uint32_t ctx_idx; 953 uint32_t ctx_idx;
943 954
944 uint8_t menlo_flag; /* menlo generic flags */ 955 uint8_t menlo_flag; /* menlo generic flags */
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index f7368eb80415..32d5683e6181 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -955,9 +955,9 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
955 spin_lock_irqsave(&phba->ct_ev_lock, flags); 955 spin_lock_irqsave(&phba->ct_ev_lock, flags);
956 if (phba->sli_rev == LPFC_SLI_REV4) { 956 if (phba->sli_rev == LPFC_SLI_REV4) {
957 evt_dat->immed_dat = phba->ctx_idx; 957 evt_dat->immed_dat = phba->ctx_idx;
958 phba->ctx_idx = (phba->ctx_idx + 1) % 64; 958 phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX;
959 /* Provide warning for over-run of the ct_ctx array */ 959 /* Provide warning for over-run of the ct_ctx array */
960 if (phba->ct_ctx[evt_dat->immed_dat].flags & 960 if (phba->ct_ctx[evt_dat->immed_dat].valid ==
961 UNSOL_VALID) 961 UNSOL_VALID)
962 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 962 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
963 "2717 CT context array entry " 963 "2717 CT context array entry "
@@ -973,7 +973,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
973 piocbq->iocb.unsli3.rcvsli3.ox_id; 973 piocbq->iocb.unsli3.rcvsli3.ox_id;
974 phba->ct_ctx[evt_dat->immed_dat].SID = 974 phba->ct_ctx[evt_dat->immed_dat].SID =
975 piocbq->iocb.un.rcvels.remoteID; 975 piocbq->iocb.un.rcvels.remoteID;
976 phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID; 976 phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID;
977 } else 977 } else
978 evt_dat->immed_dat = piocbq->iocb.ulpContext; 978 evt_dat->immed_dat = piocbq->iocb.ulpContext;
979 979
@@ -1013,6 +1013,47 @@ error_ct_unsol_exit:
1013} 1013}
1014 1014
1015/** 1015/**
1016 * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane
1017 * @phba: Pointer to HBA context object.
1018 * @dmabuf: pointer to a dmabuf that describes the FC sequence
1019 *
1020 * This function handles abort to the CT command toward management plane
1021 * for SLI4 port.
1022 *
1023 * If the pending context of a CT command to management plane present, clears
1024 * such context and returns 1 for handled; otherwise, it returns 0 indicating
1025 * no context exists.
1026 **/
1027int
1028lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
1029{
1030 struct fc_frame_header fc_hdr;
1031 struct fc_frame_header *fc_hdr_ptr = &fc_hdr;
1032 int ctx_idx, handled = 0;
1033 uint16_t oxid, rxid;
1034 uint32_t sid;
1035
1036 memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
1037 sid = sli4_sid_from_fc_hdr(fc_hdr_ptr);
1038 oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id);
1039 rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id);
1040
1041 for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) {
1042 if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID)
1043 continue;
1044 if (phba->ct_ctx[ctx_idx].rxid != rxid)
1045 continue;
1046 if (phba->ct_ctx[ctx_idx].oxid != oxid)
1047 continue;
1048 if (phba->ct_ctx[ctx_idx].SID != sid)
1049 continue;
1050 phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID;
1051 handled = 1;
1052 }
1053 return handled;
1054}
1055
1056/**
1016 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command 1057 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
1017 * @job: SET_EVENT fc_bsg_job 1058 * @job: SET_EVENT fc_bsg_job
1018 **/ 1059 **/
@@ -1318,7 +1359,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1318 icmd->ulpClass = CLASS3; 1359 icmd->ulpClass = CLASS3;
1319 if (phba->sli_rev == LPFC_SLI_REV4) { 1360 if (phba->sli_rev == LPFC_SLI_REV4) {
1320 /* Do not issue unsol response if oxid not marked as valid */ 1361 /* Do not issue unsol response if oxid not marked as valid */
1321 if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) { 1362 if (phba->ct_ctx[tag].valid != UNSOL_VALID) {
1322 rc = IOCB_ERROR; 1363 rc = IOCB_ERROR;
1323 goto issue_ct_rsp_exit; 1364 goto issue_ct_rsp_exit;
1324 } 1365 }
@@ -1352,7 +1393,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1352 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 1393 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
1353 1394
1354 /* The exchange is done, mark the entry as invalid */ 1395 /* The exchange is done, mark the entry as invalid */
1355 phba->ct_ctx[tag].flags &= ~UNSOL_VALID; 1396 phba->ct_ctx[tag].valid = UNSOL_INVALID;
1356 } else 1397 } else
1357 icmd->ulpContext = (ushort) tag; 1398 icmd->ulpContext = (ushort) tag;
1358 1399
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 69d66e3662cb..76ca65dae781 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -164,8 +164,7 @@ void lpfc_hb_timeout_handler(struct lpfc_hba *);
164 164
165void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, 165void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
166 struct lpfc_iocbq *); 166 struct lpfc_iocbq *);
167void lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, 167int lpfc_ct_handle_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
168 struct lpfc_iocbq *);
169int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t); 168int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
170int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int); 169int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
171void lpfc_fdmi_tmo(unsigned long); 170void lpfc_fdmi_tmo(unsigned long);
@@ -427,6 +426,7 @@ int lpfc_bsg_request(struct fc_bsg_job *);
427int lpfc_bsg_timeout(struct fc_bsg_job *); 426int lpfc_bsg_timeout(struct fc_bsg_job *);
428int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, 427int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
429 struct lpfc_iocbq *); 428 struct lpfc_iocbq *);
429int lpfc_bsg_ct_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
430void __lpfc_sli_ringtx_put(struct lpfc_hba *, struct lpfc_sli_ring *, 430void __lpfc_sli_ringtx_put(struct lpfc_hba *, struct lpfc_sli_ring *,
431 struct lpfc_iocbq *); 431 struct lpfc_iocbq *);
432struct lpfc_iocbq *lpfc_sli_ringtx_get(struct lpfc_hba *, 432struct lpfc_iocbq *lpfc_sli_ringtx_get(struct lpfc_hba *,
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 65f9fb6862e6..7bff3a19af56 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -164,37 +164,24 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
164} 164}
165 165
166/** 166/**
167 * lpfc_sli4_ct_abort_unsol_event - Default handle for sli4 unsol abort 167 * lpfc_ct_handle_unsol_abort - ct upper level protocol abort handler
168 * @phba: Pointer to HBA context object. 168 * @phba: Pointer to HBA context object.
169 * @pring: Pointer to the driver internal I/O ring. 169 * @dmabuf: pointer to a dmabuf that describes the FC sequence
170 * @piocbq: Pointer to the IOCBQ.
171 * 170 *
172 * This function serves as the default handler for the sli4 unsolicited 171 * This function serves as the upper level protocol abort handler for CT
173 * abort event. It shall be invoked when there is no application interface 172 * protocol.
174 * registered unsolicited abort handler. This handler does nothing but 173 *
175 * just simply releases the dma buffer used by the unsol abort event. 174 * Return 1 if abort has been handled, 0 otherwise.
176 **/ 175 **/
177void 176int
178lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *phba, 177lpfc_ct_handle_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
179 struct lpfc_sli_ring *pring,
180 struct lpfc_iocbq *piocbq)
181{ 178{
182 IOCB_t *icmd = &piocbq->iocb; 179 int handled;
183 struct lpfc_dmabuf *bdeBuf;
184 uint32_t size;
185 180
186 /* Forward abort event to any process registered to receive ct event */ 181 /* CT upper level goes through BSG */
187 if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0) 182 handled = lpfc_bsg_ct_unsol_abort(phba, dmabuf);
188 return;
189 183
190 /* If there is no BDE associated with IOCB, there is nothing to do */ 184 return handled;
191 if (icmd->ulpBdeCount == 0)
192 return;
193 bdeBuf = piocbq->context2;
194 piocbq->context2 = NULL;
195 size = icmd->un.cont64[0].tus.f.bdeSize;
196 lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf, size);
197 lpfc_in_buf_free(phba, bdeBuf);
198} 185}
199 186
200static void 187static void
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b9440deaad45..08d156a9094f 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -3122,6 +3122,13 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3122 3122
3123 case IOERR_SEQUENCE_TIMEOUT: 3123 case IOERR_SEQUENCE_TIMEOUT:
3124 case IOERR_INVALID_RPI: 3124 case IOERR_INVALID_RPI:
3125 if (cmd == ELS_CMD_PLOGI &&
3126 did == NameServer_DID) {
3127 /* Continue forever if plogi to */
3128 /* the nameserver fails */
3129 maxretry = 0;
3130 delay = 100;
3131 }
3125 retry = 1; 3132 retry = 1;
3126 break; 3133 break;
3127 } 3134 }
@@ -6517,7 +6524,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6517 struct lpfc_nodelist *ndlp; 6524 struct lpfc_nodelist *ndlp;
6518 struct ls_rjt stat; 6525 struct ls_rjt stat;
6519 uint32_t *payload; 6526 uint32_t *payload;
6520 uint32_t cmd, did, newnode, rjt_err = 0; 6527 uint32_t cmd, did, newnode;
6528 uint8_t rjt_exp, rjt_err = 0;
6521 IOCB_t *icmd = &elsiocb->iocb; 6529 IOCB_t *icmd = &elsiocb->iocb;
6522 6530
6523 if (!vport || !(elsiocb->context2)) 6531 if (!vport || !(elsiocb->context2))
@@ -6606,12 +6614,14 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6606 /* If Nport discovery is delayed, reject PLOGIs */ 6614 /* If Nport discovery is delayed, reject PLOGIs */
6607 if (vport->fc_flag & FC_DISC_DELAYED) { 6615 if (vport->fc_flag & FC_DISC_DELAYED) {
6608 rjt_err = LSRJT_UNABLE_TPC; 6616 rjt_err = LSRJT_UNABLE_TPC;
6617 rjt_exp = LSEXP_NOTHING_MORE;
6609 break; 6618 break;
6610 } 6619 }
6611 if (vport->port_state < LPFC_DISC_AUTH) { 6620 if (vport->port_state < LPFC_DISC_AUTH) {
6612 if (!(phba->pport->fc_flag & FC_PT2PT) || 6621 if (!(phba->pport->fc_flag & FC_PT2PT) ||
6613 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 6622 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
6614 rjt_err = LSRJT_UNABLE_TPC; 6623 rjt_err = LSRJT_UNABLE_TPC;
6624 rjt_exp = LSEXP_NOTHING_MORE;
6615 break; 6625 break;
6616 } 6626 }
6617 /* We get here, and drop thru, if we are PT2PT with 6627 /* We get here, and drop thru, if we are PT2PT with
@@ -6648,6 +6658,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6648 lpfc_send_els_event(vport, ndlp, payload); 6658 lpfc_send_els_event(vport, ndlp, payload);
6649 if (vport->port_state < LPFC_DISC_AUTH) { 6659 if (vport->port_state < LPFC_DISC_AUTH) {
6650 rjt_err = LSRJT_UNABLE_TPC; 6660 rjt_err = LSRJT_UNABLE_TPC;
6661 rjt_exp = LSEXP_NOTHING_MORE;
6651 break; 6662 break;
6652 } 6663 }
6653 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); 6664 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
@@ -6661,6 +6672,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6661 lpfc_send_els_event(vport, ndlp, payload); 6672 lpfc_send_els_event(vport, ndlp, payload);
6662 if (vport->port_state < LPFC_DISC_AUTH) { 6673 if (vport->port_state < LPFC_DISC_AUTH) {
6663 rjt_err = LSRJT_UNABLE_TPC; 6674 rjt_err = LSRJT_UNABLE_TPC;
6675 rjt_exp = LSEXP_NOTHING_MORE;
6664 break; 6676 break;
6665 } 6677 }
6666 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 6678 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
@@ -6680,6 +6692,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6680 phba->fc_stat.elsRcvADISC++; 6692 phba->fc_stat.elsRcvADISC++;
6681 if (vport->port_state < LPFC_DISC_AUTH) { 6693 if (vport->port_state < LPFC_DISC_AUTH) {
6682 rjt_err = LSRJT_UNABLE_TPC; 6694 rjt_err = LSRJT_UNABLE_TPC;
6695 rjt_exp = LSEXP_NOTHING_MORE;
6683 break; 6696 break;
6684 } 6697 }
6685 lpfc_disc_state_machine(vport, ndlp, elsiocb, 6698 lpfc_disc_state_machine(vport, ndlp, elsiocb,
@@ -6693,6 +6706,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6693 phba->fc_stat.elsRcvPDISC++; 6706 phba->fc_stat.elsRcvPDISC++;
6694 if (vport->port_state < LPFC_DISC_AUTH) { 6707 if (vport->port_state < LPFC_DISC_AUTH) {
6695 rjt_err = LSRJT_UNABLE_TPC; 6708 rjt_err = LSRJT_UNABLE_TPC;
6709 rjt_exp = LSEXP_NOTHING_MORE;
6696 break; 6710 break;
6697 } 6711 }
6698 lpfc_disc_state_machine(vport, ndlp, elsiocb, 6712 lpfc_disc_state_machine(vport, ndlp, elsiocb,
@@ -6730,6 +6744,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6730 phba->fc_stat.elsRcvPRLI++; 6744 phba->fc_stat.elsRcvPRLI++;
6731 if (vport->port_state < LPFC_DISC_AUTH) { 6745 if (vport->port_state < LPFC_DISC_AUTH) {
6732 rjt_err = LSRJT_UNABLE_TPC; 6746 rjt_err = LSRJT_UNABLE_TPC;
6747 rjt_exp = LSEXP_NOTHING_MORE;
6733 break; 6748 break;
6734 } 6749 }
6735 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 6750 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
@@ -6813,6 +6828,11 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6813 if (newnode) 6828 if (newnode)
6814 lpfc_nlp_put(ndlp); 6829 lpfc_nlp_put(ndlp);
6815 break; 6830 break;
6831 case ELS_CMD_REC:
6832 /* receive this due to exchange closed */
6833 rjt_err = LSRJT_UNABLE_TPC;
6834 rjt_exp = LSEXP_INVALID_OX_RX;
6835 break;
6816 default: 6836 default:
6817 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6837 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6818 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 6838 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
@@ -6820,6 +6840,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6820 6840
6821 /* Unsupported ELS command, reject */ 6841 /* Unsupported ELS command, reject */
6822 rjt_err = LSRJT_CMD_UNSUPPORTED; 6842 rjt_err = LSRJT_CMD_UNSUPPORTED;
6843 rjt_exp = LSEXP_NOTHING_MORE;
6823 6844
6824 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 6845 /* Unknown ELS command <elsCmd> received from NPORT <did> */
6825 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6846 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
@@ -6834,7 +6855,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6834 if (rjt_err) { 6855 if (rjt_err) {
6835 memset(&stat, 0, sizeof(stat)); 6856 memset(&stat, 0, sizeof(stat));
6836 stat.un.b.lsRjtRsnCode = rjt_err; 6857 stat.un.b.lsRjtRsnCode = rjt_err;
6837 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 6858 stat.un.b.lsRjtRsnCodeExp = rjt_exp;
6838 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, 6859 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
6839 NULL); 6860 NULL);
6840 } 6861 }
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 7398ca862e97..e8c476031703 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -538,6 +538,7 @@ struct fc_vft_header {
538#define ELS_CMD_ECHO 0x10000000 538#define ELS_CMD_ECHO 0x10000000
539#define ELS_CMD_TEST 0x11000000 539#define ELS_CMD_TEST 0x11000000
540#define ELS_CMD_RRQ 0x12000000 540#define ELS_CMD_RRQ 0x12000000
541#define ELS_CMD_REC 0x13000000
541#define ELS_CMD_PRLI 0x20100014 542#define ELS_CMD_PRLI 0x20100014
542#define ELS_CMD_PRLO 0x21100014 543#define ELS_CMD_PRLO 0x21100014
543#define ELS_CMD_PRLO_ACC 0x02100014 544#define ELS_CMD_PRLO_ACC 0x02100014
@@ -574,6 +575,7 @@ struct fc_vft_header {
574#define ELS_CMD_ECHO 0x10 575#define ELS_CMD_ECHO 0x10
575#define ELS_CMD_TEST 0x11 576#define ELS_CMD_TEST 0x11
576#define ELS_CMD_RRQ 0x12 577#define ELS_CMD_RRQ 0x12
578#define ELS_CMD_REC 0x13
577#define ELS_CMD_PRLI 0x14001020 579#define ELS_CMD_PRLI 0x14001020
578#define ELS_CMD_PRLO 0x14001021 580#define ELS_CMD_PRLO 0x14001021
579#define ELS_CMD_PRLO_ACC 0x14001002 581#define ELS_CMD_PRLO_ACC 0x14001002
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index a47cfbdd05f2..6e93b886cd4d 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -106,6 +106,7 @@ struct lpfc_sli_intf {
106 106
107#define LPFC_SLI4_MB_WORD_COUNT 64 107#define LPFC_SLI4_MB_WORD_COUNT 64
108#define LPFC_MAX_MQ_PAGE 8 108#define LPFC_MAX_MQ_PAGE 8
109#define LPFC_MAX_WQ_PAGE_V0 4
109#define LPFC_MAX_WQ_PAGE 8 110#define LPFC_MAX_WQ_PAGE 8
110#define LPFC_MAX_CQ_PAGE 4 111#define LPFC_MAX_CQ_PAGE 4
111#define LPFC_MAX_EQ_PAGE 8 112#define LPFC_MAX_EQ_PAGE 8
@@ -703,24 +704,41 @@ struct lpfc_register {
703 * BAR0. The offsets are the same so the driver must account for 704 * BAR0. The offsets are the same so the driver must account for
704 * any base address difference. 705 * any base address difference.
705 */ 706 */
706#define LPFC_RQ_DOORBELL 0x00A0 707#define LPFC_ULP0_RQ_DOORBELL 0x00A0
707#define lpfc_rq_doorbell_num_posted_SHIFT 16 708#define LPFC_ULP1_RQ_DOORBELL 0x00C0
708#define lpfc_rq_doorbell_num_posted_MASK 0x3FFF 709#define lpfc_rq_db_list_fm_num_posted_SHIFT 24
709#define lpfc_rq_doorbell_num_posted_WORD word0 710#define lpfc_rq_db_list_fm_num_posted_MASK 0x00FF
710#define lpfc_rq_doorbell_id_SHIFT 0 711#define lpfc_rq_db_list_fm_num_posted_WORD word0
711#define lpfc_rq_doorbell_id_MASK 0xFFFF 712#define lpfc_rq_db_list_fm_index_SHIFT 16
712#define lpfc_rq_doorbell_id_WORD word0 713#define lpfc_rq_db_list_fm_index_MASK 0x00FF
713 714#define lpfc_rq_db_list_fm_index_WORD word0
714#define LPFC_WQ_DOORBELL 0x0040 715#define lpfc_rq_db_list_fm_id_SHIFT 0
715#define lpfc_wq_doorbell_num_posted_SHIFT 24 716#define lpfc_rq_db_list_fm_id_MASK 0xFFFF
716#define lpfc_wq_doorbell_num_posted_MASK 0x00FF 717#define lpfc_rq_db_list_fm_id_WORD word0
717#define lpfc_wq_doorbell_num_posted_WORD word0 718#define lpfc_rq_db_ring_fm_num_posted_SHIFT 16
718#define lpfc_wq_doorbell_index_SHIFT 16 719#define lpfc_rq_db_ring_fm_num_posted_MASK 0x3FFF
719#define lpfc_wq_doorbell_index_MASK 0x00FF 720#define lpfc_rq_db_ring_fm_num_posted_WORD word0
720#define lpfc_wq_doorbell_index_WORD word0 721#define lpfc_rq_db_ring_fm_id_SHIFT 0
721#define lpfc_wq_doorbell_id_SHIFT 0 722#define lpfc_rq_db_ring_fm_id_MASK 0xFFFF
722#define lpfc_wq_doorbell_id_MASK 0xFFFF 723#define lpfc_rq_db_ring_fm_id_WORD word0
723#define lpfc_wq_doorbell_id_WORD word0 724
725#define LPFC_ULP0_WQ_DOORBELL 0x0040
726#define LPFC_ULP1_WQ_DOORBELL 0x0060
727#define lpfc_wq_db_list_fm_num_posted_SHIFT 24
728#define lpfc_wq_db_list_fm_num_posted_MASK 0x00FF
729#define lpfc_wq_db_list_fm_num_posted_WORD word0
730#define lpfc_wq_db_list_fm_index_SHIFT 16
731#define lpfc_wq_db_list_fm_index_MASK 0x00FF
732#define lpfc_wq_db_list_fm_index_WORD word0
733#define lpfc_wq_db_list_fm_id_SHIFT 0
734#define lpfc_wq_db_list_fm_id_MASK 0xFFFF
735#define lpfc_wq_db_list_fm_id_WORD word0
736#define lpfc_wq_db_ring_fm_num_posted_SHIFT 16
737#define lpfc_wq_db_ring_fm_num_posted_MASK 0x3FFF
738#define lpfc_wq_db_ring_fm_num_posted_WORD word0
739#define lpfc_wq_db_ring_fm_id_SHIFT 0
740#define lpfc_wq_db_ring_fm_id_MASK 0xFFFF
741#define lpfc_wq_db_ring_fm_id_WORD word0
724 742
725#define LPFC_EQCQ_DOORBELL 0x0120 743#define LPFC_EQCQ_DOORBELL 0x0120
726#define lpfc_eqcq_doorbell_se_SHIFT 31 744#define lpfc_eqcq_doorbell_se_SHIFT 31
@@ -1131,12 +1149,22 @@ struct lpfc_mbx_wq_create {
1131 struct { /* Version 0 Request */ 1149 struct { /* Version 0 Request */
1132 uint32_t word0; 1150 uint32_t word0;
1133#define lpfc_mbx_wq_create_num_pages_SHIFT 0 1151#define lpfc_mbx_wq_create_num_pages_SHIFT 0
1134#define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF 1152#define lpfc_mbx_wq_create_num_pages_MASK 0x000000FF
1135#define lpfc_mbx_wq_create_num_pages_WORD word0 1153#define lpfc_mbx_wq_create_num_pages_WORD word0
1154#define lpfc_mbx_wq_create_dua_SHIFT 8
1155#define lpfc_mbx_wq_create_dua_MASK 0x00000001
1156#define lpfc_mbx_wq_create_dua_WORD word0
1136#define lpfc_mbx_wq_create_cq_id_SHIFT 16 1157#define lpfc_mbx_wq_create_cq_id_SHIFT 16
1137#define lpfc_mbx_wq_create_cq_id_MASK 0x0000FFFF 1158#define lpfc_mbx_wq_create_cq_id_MASK 0x0000FFFF
1138#define lpfc_mbx_wq_create_cq_id_WORD word0 1159#define lpfc_mbx_wq_create_cq_id_WORD word0
1139 struct dma_address page[LPFC_MAX_WQ_PAGE]; 1160 struct dma_address page[LPFC_MAX_WQ_PAGE_V0];
1161 uint32_t word9;
1162#define lpfc_mbx_wq_create_bua_SHIFT 0
1163#define lpfc_mbx_wq_create_bua_MASK 0x00000001
1164#define lpfc_mbx_wq_create_bua_WORD word9
1165#define lpfc_mbx_wq_create_ulp_num_SHIFT 8
1166#define lpfc_mbx_wq_create_ulp_num_MASK 0x000000FF
1167#define lpfc_mbx_wq_create_ulp_num_WORD word9
1140 } request; 1168 } request;
1141 struct { /* Version 1 Request */ 1169 struct { /* Version 1 Request */
1142 uint32_t word0; /* Word 0 is the same as in v0 */ 1170 uint32_t word0; /* Word 0 is the same as in v0 */
@@ -1160,6 +1188,17 @@ struct lpfc_mbx_wq_create {
1160#define lpfc_mbx_wq_create_q_id_SHIFT 0 1188#define lpfc_mbx_wq_create_q_id_SHIFT 0
1161#define lpfc_mbx_wq_create_q_id_MASK 0x0000FFFF 1189#define lpfc_mbx_wq_create_q_id_MASK 0x0000FFFF
1162#define lpfc_mbx_wq_create_q_id_WORD word0 1190#define lpfc_mbx_wq_create_q_id_WORD word0
1191 uint32_t doorbell_offset;
1192 uint32_t word2;
1193#define lpfc_mbx_wq_create_bar_set_SHIFT 0
1194#define lpfc_mbx_wq_create_bar_set_MASK 0x0000FFFF
1195#define lpfc_mbx_wq_create_bar_set_WORD word2
1196#define WQ_PCI_BAR_0_AND_1 0x00
1197#define WQ_PCI_BAR_2_AND_3 0x01
1198#define WQ_PCI_BAR_4_AND_5 0x02
1199#define lpfc_mbx_wq_create_db_format_SHIFT 16
1200#define lpfc_mbx_wq_create_db_format_MASK 0x0000FFFF
1201#define lpfc_mbx_wq_create_db_format_WORD word2
1163 } response; 1202 } response;
1164 } u; 1203 } u;
1165}; 1204};
@@ -1223,14 +1262,31 @@ struct lpfc_mbx_rq_create {
1223#define lpfc_mbx_rq_create_num_pages_SHIFT 0 1262#define lpfc_mbx_rq_create_num_pages_SHIFT 0
1224#define lpfc_mbx_rq_create_num_pages_MASK 0x0000FFFF 1263#define lpfc_mbx_rq_create_num_pages_MASK 0x0000FFFF
1225#define lpfc_mbx_rq_create_num_pages_WORD word0 1264#define lpfc_mbx_rq_create_num_pages_WORD word0
1265#define lpfc_mbx_rq_create_dua_SHIFT 16
1266#define lpfc_mbx_rq_create_dua_MASK 0x00000001
1267#define lpfc_mbx_rq_create_dua_WORD word0
1268#define lpfc_mbx_rq_create_bqu_SHIFT 17
1269#define lpfc_mbx_rq_create_bqu_MASK 0x00000001
1270#define lpfc_mbx_rq_create_bqu_WORD word0
1271#define lpfc_mbx_rq_create_ulp_num_SHIFT 24
1272#define lpfc_mbx_rq_create_ulp_num_MASK 0x000000FF
1273#define lpfc_mbx_rq_create_ulp_num_WORD word0
1226 struct rq_context context; 1274 struct rq_context context;
1227 struct dma_address page[LPFC_MAX_WQ_PAGE]; 1275 struct dma_address page[LPFC_MAX_WQ_PAGE];
1228 } request; 1276 } request;
1229 struct { 1277 struct {
1230 uint32_t word0; 1278 uint32_t word0;
1231#define lpfc_mbx_rq_create_q_id_SHIFT 0 1279#define lpfc_mbx_rq_create_q_id_SHIFT 0
1232#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF 1280#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF
1233#define lpfc_mbx_rq_create_q_id_WORD word0 1281#define lpfc_mbx_rq_create_q_id_WORD word0
1282 uint32_t doorbell_offset;
1283 uint32_t word2;
1284#define lpfc_mbx_rq_create_bar_set_SHIFT 0
1285#define lpfc_mbx_rq_create_bar_set_MASK 0x0000FFFF
1286#define lpfc_mbx_rq_create_bar_set_WORD word2
1287#define lpfc_mbx_rq_create_db_format_SHIFT 16
1288#define lpfc_mbx_rq_create_db_format_MASK 0x0000FFFF
1289#define lpfc_mbx_rq_create_db_format_WORD word2
1234 } response; 1290 } response;
1235 } u; 1291 } u;
1236}; 1292};
@@ -1388,6 +1444,33 @@ struct lpfc_mbx_get_rsrc_extent_info {
1388 } u; 1444 } u;
1389}; 1445};
1390 1446
1447struct lpfc_mbx_query_fw_config {
1448 struct mbox_header header;
1449 struct {
1450 uint32_t config_number;
1451#define LPFC_FC_FCOE 0x00000007
1452 uint32_t asic_revision;
1453 uint32_t physical_port;
1454 uint32_t function_mode;
1455#define LPFC_FCOE_INI_MODE 0x00000040
1456#define LPFC_FCOE_TGT_MODE 0x00000080
1457#define LPFC_DUA_MODE 0x00000800
1458 uint32_t ulp0_mode;
1459#define LPFC_ULP_FCOE_INIT_MODE 0x00000040
1460#define LPFC_ULP_FCOE_TGT_MODE 0x00000080
1461 uint32_t ulp0_nap_words[12];
1462 uint32_t ulp1_mode;
1463 uint32_t ulp1_nap_words[12];
1464 uint32_t function_capabilities;
1465 uint32_t cqid_base;
1466 uint32_t cqid_tot;
1467 uint32_t eqid_base;
1468 uint32_t eqid_tot;
1469 uint32_t ulp0_nap2_words[2];
1470 uint32_t ulp1_nap2_words[2];
1471 } rsp;
1472};
1473
1391struct lpfc_id_range { 1474struct lpfc_id_range {
1392 uint32_t word5; 1475 uint32_t word5;
1393#define lpfc_mbx_rsrc_id_word4_0_SHIFT 0 1476#define lpfc_mbx_rsrc_id_word4_0_SHIFT 0
@@ -1803,51 +1886,6 @@ struct lpfc_mbx_redisc_fcf_tbl {
1803#define lpfc_mbx_redisc_fcf_index_WORD word12 1886#define lpfc_mbx_redisc_fcf_index_WORD word12
1804}; 1887};
1805 1888
1806struct lpfc_mbx_query_fw_cfg {
1807 struct mbox_header header;
1808 uint32_t config_number;
1809 uint32_t asic_rev;
1810 uint32_t phys_port;
1811 uint32_t function_mode;
1812/* firmware Function Mode */
1813#define lpfc_function_mode_toe_SHIFT 0
1814#define lpfc_function_mode_toe_MASK 0x00000001
1815#define lpfc_function_mode_toe_WORD function_mode
1816#define lpfc_function_mode_nic_SHIFT 1
1817#define lpfc_function_mode_nic_MASK 0x00000001
1818#define lpfc_function_mode_nic_WORD function_mode
1819#define lpfc_function_mode_rdma_SHIFT 2
1820#define lpfc_function_mode_rdma_MASK 0x00000001
1821#define lpfc_function_mode_rdma_WORD function_mode
1822#define lpfc_function_mode_vm_SHIFT 3
1823#define lpfc_function_mode_vm_MASK 0x00000001
1824#define lpfc_function_mode_vm_WORD function_mode
1825#define lpfc_function_mode_iscsi_i_SHIFT 4
1826#define lpfc_function_mode_iscsi_i_MASK 0x00000001
1827#define lpfc_function_mode_iscsi_i_WORD function_mode
1828#define lpfc_function_mode_iscsi_t_SHIFT 5
1829#define lpfc_function_mode_iscsi_t_MASK 0x00000001
1830#define lpfc_function_mode_iscsi_t_WORD function_mode
1831#define lpfc_function_mode_fcoe_i_SHIFT 6
1832#define lpfc_function_mode_fcoe_i_MASK 0x00000001
1833#define lpfc_function_mode_fcoe_i_WORD function_mode
1834#define lpfc_function_mode_fcoe_t_SHIFT 7
1835#define lpfc_function_mode_fcoe_t_MASK 0x00000001
1836#define lpfc_function_mode_fcoe_t_WORD function_mode
1837#define lpfc_function_mode_dal_SHIFT 8
1838#define lpfc_function_mode_dal_MASK 0x00000001
1839#define lpfc_function_mode_dal_WORD function_mode
1840#define lpfc_function_mode_lro_SHIFT 9
1841#define lpfc_function_mode_lro_MASK 0x00000001
1842#define lpfc_function_mode_lro_WORD function_mode
1843#define lpfc_function_mode_flex10_SHIFT 10
1844#define lpfc_function_mode_flex10_MASK 0x00000001
1845#define lpfc_function_mode_flex10_WORD function_mode
1846#define lpfc_function_mode_ncsi_SHIFT 11
1847#define lpfc_function_mode_ncsi_MASK 0x00000001
1848#define lpfc_function_mode_ncsi_WORD function_mode
1849};
1850
1851/* Status field for embedded SLI_CONFIG mailbox command */ 1889/* Status field for embedded SLI_CONFIG mailbox command */
1852#define STATUS_SUCCESS 0x0 1890#define STATUS_SUCCESS 0x0
1853#define STATUS_FAILED 0x1 1891#define STATUS_FAILED 0x1
@@ -2965,7 +3003,7 @@ struct lpfc_mqe {
2965 struct lpfc_mbx_read_config rd_config; 3003 struct lpfc_mbx_read_config rd_config;
2966 struct lpfc_mbx_request_features req_ftrs; 3004 struct lpfc_mbx_request_features req_ftrs;
2967 struct lpfc_mbx_post_hdr_tmpl hdr_tmpl; 3005 struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
2968 struct lpfc_mbx_query_fw_cfg query_fw_cfg; 3006 struct lpfc_mbx_query_fw_config query_fw_cfg;
2969 struct lpfc_mbx_supp_pages supp_pages; 3007 struct lpfc_mbx_supp_pages supp_pages;
2970 struct lpfc_mbx_pc_sli4_params sli4_params; 3008 struct lpfc_mbx_pc_sli4_params sli4_params;
2971 struct lpfc_mbx_get_sli4_parameters get_sli4_parameters; 3009 struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 7de4ef14698f..314b4f61b9e3 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -6229,9 +6229,11 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
6229 phba->sli4_hba.conf_regs_memmap_p + 6229 phba->sli4_hba.conf_regs_memmap_p +
6230 LPFC_CTL_PORT_SEM_OFFSET; 6230 LPFC_CTL_PORT_SEM_OFFSET;
6231 phba->sli4_hba.RQDBregaddr = 6231 phba->sli4_hba.RQDBregaddr =
6232 phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL; 6232 phba->sli4_hba.conf_regs_memmap_p +
6233 LPFC_ULP0_RQ_DOORBELL;
6233 phba->sli4_hba.WQDBregaddr = 6234 phba->sli4_hba.WQDBregaddr =
6234 phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL; 6235 phba->sli4_hba.conf_regs_memmap_p +
6236 LPFC_ULP0_WQ_DOORBELL;
6235 phba->sli4_hba.EQCQDBregaddr = 6237 phba->sli4_hba.EQCQDBregaddr =
6236 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 6238 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
6237 phba->sli4_hba.MQDBregaddr = 6239 phba->sli4_hba.MQDBregaddr =
@@ -6285,9 +6287,11 @@ lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
6285 return -ENODEV; 6287 return -ENODEV;
6286 6288
6287 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6289 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6288 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL); 6290 vf * LPFC_VFR_PAGE_SIZE +
6291 LPFC_ULP0_RQ_DOORBELL);
6289 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6292 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6290 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL); 6293 vf * LPFC_VFR_PAGE_SIZE +
6294 LPFC_ULP0_WQ_DOORBELL);
6291 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6295 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6292 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); 6296 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
6293 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6297 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
@@ -6983,6 +6987,19 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6983 phba->sli4_hba.fcp_wq = NULL; 6987 phba->sli4_hba.fcp_wq = NULL;
6984 } 6988 }
6985 6989
6990 if (phba->pci_bar0_memmap_p) {
6991 iounmap(phba->pci_bar0_memmap_p);
6992 phba->pci_bar0_memmap_p = NULL;
6993 }
6994 if (phba->pci_bar2_memmap_p) {
6995 iounmap(phba->pci_bar2_memmap_p);
6996 phba->pci_bar2_memmap_p = NULL;
6997 }
6998 if (phba->pci_bar4_memmap_p) {
6999 iounmap(phba->pci_bar4_memmap_p);
7000 phba->pci_bar4_memmap_p = NULL;
7001 }
7002
6986 /* Release FCP CQ mapping array */ 7003 /* Release FCP CQ mapping array */
6987 if (phba->sli4_hba.fcp_cq_map != NULL) { 7004 if (phba->sli4_hba.fcp_cq_map != NULL) {
6988 kfree(phba->sli4_hba.fcp_cq_map); 7005 kfree(phba->sli4_hba.fcp_cq_map);
@@ -7046,6 +7063,53 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7046 int rc = -ENOMEM; 7063 int rc = -ENOMEM;
7047 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 7064 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
7048 int fcp_cq_index = 0; 7065 int fcp_cq_index = 0;
7066 uint32_t shdr_status, shdr_add_status;
7067 union lpfc_sli4_cfg_shdr *shdr;
7068 LPFC_MBOXQ_t *mboxq;
7069 uint32_t length;
7070
7071 /* Check for dual-ULP support */
7072 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7073 if (!mboxq) {
7074 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7075 "3249 Unable to allocate memory for "
7076 "QUERY_FW_CFG mailbox command\n");
7077 return -ENOMEM;
7078 }
7079 length = (sizeof(struct lpfc_mbx_query_fw_config) -
7080 sizeof(struct lpfc_sli4_cfg_mhdr));
7081 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7082 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
7083 length, LPFC_SLI4_MBX_EMBED);
7084
7085 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7086
7087 shdr = (union lpfc_sli4_cfg_shdr *)
7088 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7089 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7090 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7091 if (shdr_status || shdr_add_status || rc) {
7092 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7093 "3250 QUERY_FW_CFG mailbox failed with status "
7094 "x%x add_status x%x, mbx status x%x\n",
7095 shdr_status, shdr_add_status, rc);
7096 if (rc != MBX_TIMEOUT)
7097 mempool_free(mboxq, phba->mbox_mem_pool);
7098 rc = -ENXIO;
7099 goto out_error;
7100 }
7101
7102 phba->sli4_hba.fw_func_mode =
7103 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
7104 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
7105 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
7106 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7107 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
7108 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
7109 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
7110
7111 if (rc != MBX_TIMEOUT)
7112 mempool_free(mboxq, phba->mbox_mem_pool);
7049 7113
7050 /* 7114 /*
7051 * Set up HBA Event Queues (EQs) 7115 * Set up HBA Event Queues (EQs)
@@ -7660,78 +7724,6 @@ out:
7660} 7724}
7661 7725
7662/** 7726/**
7663 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
7664 * @phba: pointer to lpfc hba data structure.
7665 * @cnt: number of nop mailbox commands to send.
7666 *
7667 * This routine is invoked to send a number @cnt of NOP mailbox command and
7668 * wait for each command to complete.
7669 *
7670 * Return: the number of NOP mailbox command completed.
7671 **/
7672static int
7673lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
7674{
7675 LPFC_MBOXQ_t *mboxq;
7676 int length, cmdsent;
7677 uint32_t mbox_tmo;
7678 uint32_t rc = 0;
7679 uint32_t shdr_status, shdr_add_status;
7680 union lpfc_sli4_cfg_shdr *shdr;
7681
7682 if (cnt == 0) {
7683 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7684 "2518 Requested to send 0 NOP mailbox cmd\n");
7685 return cnt;
7686 }
7687
7688 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7689 if (!mboxq) {
7690 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7691 "2519 Unable to allocate memory for issuing "
7692 "NOP mailbox command\n");
7693 return 0;
7694 }
7695
7696 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */
7697 length = (sizeof(struct lpfc_mbx_nop) -
7698 sizeof(struct lpfc_sli4_cfg_mhdr));
7699
7700 for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
7701 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7702 LPFC_MBOX_OPCODE_NOP, length,
7703 LPFC_SLI4_MBX_EMBED);
7704 if (!phba->sli4_hba.intr_enable)
7705 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7706 else {
7707 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
7708 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
7709 }
7710 if (rc == MBX_TIMEOUT)
7711 break;
7712 /* Check return status */
7713 shdr = (union lpfc_sli4_cfg_shdr *)
7714 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7715 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7716 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
7717 &shdr->response);
7718 if (shdr_status || shdr_add_status || rc) {
7719 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7720 "2520 NOP mailbox command failed "
7721 "status x%x add_status x%x mbx "
7722 "status x%x\n", shdr_status,
7723 shdr_add_status, rc);
7724 break;
7725 }
7726 }
7727
7728 if (rc != MBX_TIMEOUT)
7729 mempool_free(mboxq, phba->mbox_mem_pool);
7730
7731 return cmdsent;
7732}
7733
7734/**
7735 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 7727 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
7736 * @phba: pointer to lpfc hba data structure. 7728 * @phba: pointer to lpfc hba data structure.
7737 * 7729 *
@@ -8499,37 +8491,6 @@ lpfc_unset_hba(struct lpfc_hba *phba)
8499} 8491}
8500 8492
8501/** 8493/**
8502 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
8503 * @phba: pointer to lpfc hba data structure.
8504 *
8505 * This routine is invoked to unset the HBA device initialization steps to
8506 * a device with SLI-4 interface spec.
8507 **/
8508static void
8509lpfc_sli4_unset_hba(struct lpfc_hba *phba)
8510{
8511 struct lpfc_vport *vport = phba->pport;
8512 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8513
8514 spin_lock_irq(shost->host_lock);
8515 vport->load_flag |= FC_UNLOADING;
8516 spin_unlock_irq(shost->host_lock);
8517
8518 phba->pport->work_port_events = 0;
8519
8520 /* Stop the SLI4 device port */
8521 lpfc_stop_port(phba);
8522
8523 lpfc_sli4_disable_intr(phba);
8524
8525 /* Reset SLI4 HBA FCoE function */
8526 lpfc_pci_function_reset(phba);
8527 lpfc_sli4_queue_destroy(phba);
8528
8529 return;
8530}
8531
8532/**
8533 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 8494 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
8534 * @phba: Pointer to HBA context object. 8495 * @phba: Pointer to HBA context object.
8535 * 8496 *
@@ -9591,7 +9552,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9591 struct Scsi_Host *shost = NULL; 9552 struct Scsi_Host *shost = NULL;
9592 int error, ret; 9553 int error, ret;
9593 uint32_t cfg_mode, intr_mode; 9554 uint32_t cfg_mode, intr_mode;
9594 int mcnt;
9595 int adjusted_fcp_io_channel; 9555 int adjusted_fcp_io_channel;
9596 9556
9597 /* Allocate memory for HBA structure */ 9557 /* Allocate memory for HBA structure */
@@ -9680,58 +9640,35 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9680 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 9640 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
9681 /* Now, trying to enable interrupt and bring up the device */ 9641 /* Now, trying to enable interrupt and bring up the device */
9682 cfg_mode = phba->cfg_use_msi; 9642 cfg_mode = phba->cfg_use_msi;
9683 while (true) {
9684 /* Put device to a known state before enabling interrupt */
9685 lpfc_stop_port(phba);
9686 /* Configure and enable interrupt */
9687 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
9688 if (intr_mode == LPFC_INTR_ERROR) {
9689 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9690 "0426 Failed to enable interrupt.\n");
9691 error = -ENODEV;
9692 goto out_free_sysfs_attr;
9693 }
9694 /* Default to single EQ for non-MSI-X */
9695 if (phba->intr_type != MSIX)
9696 adjusted_fcp_io_channel = 1;
9697 else
9698 adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
9699 phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
9700 /* Set up SLI-4 HBA */
9701 if (lpfc_sli4_hba_setup(phba)) {
9702 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9703 "1421 Failed to set up hba\n");
9704 error = -ENODEV;
9705 goto out_disable_intr;
9706 }
9707 9643
9708 /* Send NOP mbx cmds for non-INTx mode active interrupt test */ 9644 /* Put device to a known state before enabling interrupt */
9709 if (intr_mode != 0) 9645 lpfc_stop_port(phba);
9710 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba, 9646 /* Configure and enable interrupt */
9711 LPFC_ACT_INTR_CNT); 9647 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
9712 9648 if (intr_mode == LPFC_INTR_ERROR) {
9713 /* Check active interrupts received only for MSI/MSI-X */ 9649 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9714 if (intr_mode == 0 || 9650 "0426 Failed to enable interrupt.\n");
9715 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) { 9651 error = -ENODEV;
9716 /* Log the current active interrupt mode */ 9652 goto out_free_sysfs_attr;
9717 phba->intr_mode = intr_mode; 9653 }
9718 lpfc_log_intr_mode(phba, intr_mode); 9654 /* Default to single EQ for non-MSI-X */
9719 break; 9655 if (phba->intr_type != MSIX)
9720 } 9656 adjusted_fcp_io_channel = 1;
9721 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9657 else
9722 "0451 Configure interrupt mode (%d) " 9658 adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
9723 "failed active interrupt test.\n", 9659 phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
9724 intr_mode); 9660 /* Set up SLI-4 HBA */
9725 /* Unset the previous SLI-4 HBA setup. */ 9661 if (lpfc_sli4_hba_setup(phba)) {
9726 /* 9662 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9727 * TODO: Is this operation compatible with IF TYPE 2 9663 "1421 Failed to set up hba\n");
9728 * devices? All port state is deleted and cleared. 9664 error = -ENODEV;
9729 */ 9665 goto out_disable_intr;
9730 lpfc_sli4_unset_hba(phba);
9731 /* Try next level of interrupt mode */
9732 cfg_mode = --intr_mode;
9733 } 9666 }
9734 9667
9668 /* Log the current active interrupt mode */
9669 phba->intr_mode = intr_mode;
9670 lpfc_log_intr_mode(phba, intr_mode);
9671
9735 /* Perform post initialization setup */ 9672 /* Perform post initialization setup */
9736 lpfc_post_init_setup(phba); 9673 lpfc_post_init_setup(phba);
9737 9674
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index d8fadcb2db73..46128c679202 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1115,6 +1115,13 @@ out:
1115 "0261 Cannot Register NameServer login\n"); 1115 "0261 Cannot Register NameServer login\n");
1116 } 1116 }
1117 1117
1118 /*
1119 ** In case the node reference counter does not go to zero, ensure that
1120 ** the stale state for the node is not processed.
1121 */
1122
1123 ndlp->nlp_prev_state = ndlp->nlp_state;
1124 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1118 spin_lock_irq(shost->host_lock); 1125 spin_lock_irq(shost->host_lock);
1119 ndlp->nlp_flag |= NLP_DEFER_RM; 1126 ndlp->nlp_flag |= NLP_DEFER_RM;
1120 spin_unlock_irq(shost->host_lock); 1127 spin_unlock_irq(shost->host_lock);
@@ -2159,13 +2166,16 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2159{ 2166{
2160 struct lpfc_iocbq *cmdiocb, *rspiocb; 2167 struct lpfc_iocbq *cmdiocb, *rspiocb;
2161 IOCB_t *irsp; 2168 IOCB_t *irsp;
2169 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2162 2170
2163 cmdiocb = (struct lpfc_iocbq *) arg; 2171 cmdiocb = (struct lpfc_iocbq *) arg;
2164 rspiocb = cmdiocb->context_un.rsp_iocb; 2172 rspiocb = cmdiocb->context_un.rsp_iocb;
2165 2173
2166 irsp = &rspiocb->iocb; 2174 irsp = &rspiocb->iocb;
2167 if (irsp->ulpStatus) { 2175 if (irsp->ulpStatus) {
2176 spin_lock_irq(shost->host_lock);
2168 ndlp->nlp_flag |= NLP_DEFER_RM; 2177 ndlp->nlp_flag |= NLP_DEFER_RM;
2178 spin_unlock_irq(shost->host_lock);
2169 return NLP_STE_FREED_NODE; 2179 return NLP_STE_FREED_NODE;
2170 } 2180 }
2171 return ndlp->nlp_state; 2181 return ndlp->nlp_state;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 60e5a177644c..98af07c6e300 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -288,6 +288,26 @@ lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
288} 288}
289 289
290/** 290/**
291 * lpfc_change_queue_type() - Change a device's scsi tag queuing type
292 * @sdev: Pointer the scsi device whose queue depth is to change
293 * @tag_type: Identifier for queue tag type
294 */
295static int
296lpfc_change_queue_type(struct scsi_device *sdev, int tag_type)
297{
298 if (sdev->tagged_supported) {
299 scsi_set_tag_type(sdev, tag_type);
300 if (tag_type)
301 scsi_activate_tcq(sdev, sdev->queue_depth);
302 else
303 scsi_deactivate_tcq(sdev, sdev->queue_depth);
304 } else
305 tag_type = 0;
306
307 return tag_type;
308}
309
310/**
291 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread 311 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
292 * @phba: The Hba for which this call is being executed. 312 * @phba: The Hba for which this call is being executed.
293 * 313 *
@@ -3972,7 +3992,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3972 break; 3992 break;
3973 } 3993 }
3974 } else 3994 } else
3975 fcp_cmnd->fcpCntl1 = 0; 3995 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
3976 3996
3977 sli4 = (phba->sli_rev == LPFC_SLI_REV4); 3997 sli4 = (phba->sli_rev == LPFC_SLI_REV4);
3978 3998
@@ -5150,6 +5170,7 @@ struct scsi_host_template lpfc_template = {
5150 .max_sectors = 0xFFFF, 5170 .max_sectors = 0xFFFF,
5151 .vendor_id = LPFC_NL_VENDOR_ID, 5171 .vendor_id = LPFC_NL_VENDOR_ID,
5152 .change_queue_depth = lpfc_change_queue_depth, 5172 .change_queue_depth = lpfc_change_queue_depth,
5173 .change_queue_type = lpfc_change_queue_type,
5153}; 5174};
5154 5175
5155struct scsi_host_template lpfc_vport_template = { 5176struct scsi_host_template lpfc_vport_template = {
@@ -5172,4 +5193,5 @@ struct scsi_host_template lpfc_vport_template = {
5172 .shost_attrs = lpfc_vport_attrs, 5193 .shost_attrs = lpfc_vport_attrs,
5173 .max_sectors = 0xFFFF, 5194 .max_sectors = 0xFFFF,
5174 .change_queue_depth = lpfc_change_queue_depth, 5195 .change_queue_depth = lpfc_change_queue_depth,
5196 .change_queue_type = lpfc_change_queue_type,
5175}; 5197};
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 624eab370396..55b6fc83ad71 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -124,10 +124,17 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
124 124
125 /* Ring Doorbell */ 125 /* Ring Doorbell */
126 doorbell.word0 = 0; 126 doorbell.word0 = 0;
127 bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1); 127 if (q->db_format == LPFC_DB_LIST_FORMAT) {
128 bf_set(lpfc_wq_doorbell_index, &doorbell, host_index); 128 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
129 bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id); 129 bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
130 writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr); 130 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
131 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
132 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
133 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
134 } else {
135 return -EINVAL;
136 }
137 writel(doorbell.word0, q->db_regaddr);
131 138
132 return 0; 139 return 0;
133} 140}
@@ -456,10 +463,20 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
456 /* Ring The Header Receive Queue Doorbell */ 463 /* Ring The Header Receive Queue Doorbell */
457 if (!(hq->host_index % hq->entry_repost)) { 464 if (!(hq->host_index % hq->entry_repost)) {
458 doorbell.word0 = 0; 465 doorbell.word0 = 0;
459 bf_set(lpfc_rq_doorbell_num_posted, &doorbell, 466 if (hq->db_format == LPFC_DB_RING_FORMAT) {
460 hq->entry_repost); 467 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
461 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id); 468 hq->entry_repost);
462 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr); 469 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
470 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
471 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
472 hq->entry_repost);
473 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
474 hq->host_index);
475 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
476 } else {
477 return -EINVAL;
478 }
479 writel(doorbell.word0, hq->db_regaddr);
463 } 480 }
464 return put_index; 481 return put_index;
465} 482}
@@ -4939,7 +4956,7 @@ out_free_mboxq:
4939static void 4956static void
4940lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) 4957lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4941{ 4958{
4942 uint8_t fcp_eqidx; 4959 int fcp_eqidx;
4943 4960
4944 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); 4961 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4945 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 4962 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
@@ -5622,6 +5639,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5622 } 5639 }
5623 /* RPIs. */ 5640 /* RPIs. */
5624 count = phba->sli4_hba.max_cfg_param.max_rpi; 5641 count = phba->sli4_hba.max_cfg_param.max_rpi;
5642 if (count <= 0) {
5643 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5644 "3279 Invalid provisioning of "
5645 "rpi:%d\n", count);
5646 rc = -EINVAL;
5647 goto err_exit;
5648 }
5625 base = phba->sli4_hba.max_cfg_param.rpi_base; 5649 base = phba->sli4_hba.max_cfg_param.rpi_base;
5626 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5650 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5627 phba->sli4_hba.rpi_bmask = kzalloc(longs * 5651 phba->sli4_hba.rpi_bmask = kzalloc(longs *
@@ -5644,6 +5668,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5644 5668
5645 /* VPIs. */ 5669 /* VPIs. */
5646 count = phba->sli4_hba.max_cfg_param.max_vpi; 5670 count = phba->sli4_hba.max_cfg_param.max_vpi;
5671 if (count <= 0) {
5672 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5673 "3280 Invalid provisioning of "
5674 "vpi:%d\n", count);
5675 rc = -EINVAL;
5676 goto free_rpi_ids;
5677 }
5647 base = phba->sli4_hba.max_cfg_param.vpi_base; 5678 base = phba->sli4_hba.max_cfg_param.vpi_base;
5648 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5679 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5649 phba->vpi_bmask = kzalloc(longs * 5680 phba->vpi_bmask = kzalloc(longs *
@@ -5666,6 +5697,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5666 5697
5667 /* XRIs. */ 5698 /* XRIs. */
5668 count = phba->sli4_hba.max_cfg_param.max_xri; 5699 count = phba->sli4_hba.max_cfg_param.max_xri;
5700 if (count <= 0) {
5701 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5702 "3281 Invalid provisioning of "
5703 "xri:%d\n", count);
5704 rc = -EINVAL;
5705 goto free_vpi_ids;
5706 }
5669 base = phba->sli4_hba.max_cfg_param.xri_base; 5707 base = phba->sli4_hba.max_cfg_param.xri_base;
5670 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5708 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5671 phba->sli4_hba.xri_bmask = kzalloc(longs * 5709 phba->sli4_hba.xri_bmask = kzalloc(longs *
@@ -5689,6 +5727,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5689 5727
5690 /* VFIs. */ 5728 /* VFIs. */
5691 count = phba->sli4_hba.max_cfg_param.max_vfi; 5729 count = phba->sli4_hba.max_cfg_param.max_vfi;
5730 if (count <= 0) {
5731 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5732 "3282 Invalid provisioning of "
5733 "vfi:%d\n", count);
5734 rc = -EINVAL;
5735 goto free_xri_ids;
5736 }
5692 base = phba->sli4_hba.max_cfg_param.vfi_base; 5737 base = phba->sli4_hba.max_cfg_param.vfi_base;
5693 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5738 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5694 phba->sli4_hba.vfi_bmask = kzalloc(longs * 5739 phba->sli4_hba.vfi_bmask = kzalloc(longs *
@@ -8370,7 +8415,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8370 * This is a continuation of a commandi,(CX) so this 8415 * This is a continuation of a commandi,(CX) so this
8371 * sglq is on the active list 8416 * sglq is on the active list
8372 */ 8417 */
8373 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag); 8418 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
8374 if (!sglq) 8419 if (!sglq)
8375 return IOCB_ERROR; 8420 return IOCB_ERROR;
8376 } 8421 }
@@ -8855,12 +8900,6 @@ lpfc_sli_setup(struct lpfc_hba *phba)
8855 pring->prt[3].type = FC_TYPE_CT; 8900 pring->prt[3].type = FC_TYPE_CT;
8856 pring->prt[3].lpfc_sli_rcv_unsol_event = 8901 pring->prt[3].lpfc_sli_rcv_unsol_event =
8857 lpfc_ct_unsol_event; 8902 lpfc_ct_unsol_event;
8858 /* abort unsolicited sequence */
8859 pring->prt[4].profile = 0; /* Mask 4 */
8860 pring->prt[4].rctl = FC_RCTL_BA_ABTS;
8861 pring->prt[4].type = FC_TYPE_BLS;
8862 pring->prt[4].lpfc_sli_rcv_unsol_event =
8863 lpfc_sli4_ct_abort_unsol_event;
8864 break; 8903 break;
8865 } 8904 }
8866 totiocbsize += (pring->sli.sli3.numCiocb * 8905 totiocbsize += (pring->sli.sli3.numCiocb *
@@ -11873,7 +11912,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
11873 struct lpfc_eqe *eqe; 11912 struct lpfc_eqe *eqe;
11874 unsigned long iflag; 11913 unsigned long iflag;
11875 int ecount = 0; 11914 int ecount = 0;
11876 uint32_t fcp_eqidx; 11915 int fcp_eqidx;
11877 11916
11878 /* Get the driver's phba structure from the dev_id */ 11917 /* Get the driver's phba structure from the dev_id */
11879 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id; 11918 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
@@ -11975,7 +12014,7 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
11975 struct lpfc_hba *phba; 12014 struct lpfc_hba *phba;
11976 irqreturn_t hba_irq_rc; 12015 irqreturn_t hba_irq_rc;
11977 bool hba_handled = false; 12016 bool hba_handled = false;
11978 uint32_t fcp_eqidx; 12017 int fcp_eqidx;
11979 12018
11980 /* Get the driver's phba structure from the dev_id */ 12019 /* Get the driver's phba structure from the dev_id */
11981 phba = (struct lpfc_hba *)dev_id; 12020 phba = (struct lpfc_hba *)dev_id;
@@ -12097,6 +12136,54 @@ out_fail:
12097} 12136}
12098 12137
12099/** 12138/**
12139 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
12140 * @phba: HBA structure that indicates port to create a queue on.
12141 * @pci_barset: PCI BAR set flag.
12142 *
12143 * This function shall perform iomap of the specified PCI BAR address to host
12144 * memory address if not already done so and return it. The returned host
12145 * memory address can be NULL.
12146 */
12147static void __iomem *
12148lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
12149{
12150 struct pci_dev *pdev;
12151 unsigned long bar_map, bar_map_len;
12152
12153 if (!phba->pcidev)
12154 return NULL;
12155 else
12156 pdev = phba->pcidev;
12157
12158 switch (pci_barset) {
12159 case WQ_PCI_BAR_0_AND_1:
12160 if (!phba->pci_bar0_memmap_p) {
12161 bar_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
12162 bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
12163 phba->pci_bar0_memmap_p = ioremap(bar_map, bar_map_len);
12164 }
12165 return phba->pci_bar0_memmap_p;
12166 case WQ_PCI_BAR_2_AND_3:
12167 if (!phba->pci_bar2_memmap_p) {
12168 bar_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
12169 bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
12170 phba->pci_bar2_memmap_p = ioremap(bar_map, bar_map_len);
12171 }
12172 return phba->pci_bar2_memmap_p;
12173 case WQ_PCI_BAR_4_AND_5:
12174 if (!phba->pci_bar4_memmap_p) {
12175 bar_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
12176 bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
12177 phba->pci_bar4_memmap_p = ioremap(bar_map, bar_map_len);
12178 }
12179 return phba->pci_bar4_memmap_p;
12180 default:
12181 break;
12182 }
12183 return NULL;
12184}
12185
12186/**
12100 * lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs 12187 * lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs
12101 * @phba: HBA structure that indicates port to create a queue on. 12188 * @phba: HBA structure that indicates port to create a queue on.
12102 * @startq: The starting FCP EQ to modify 12189 * @startq: The starting FCP EQ to modify
@@ -12673,6 +12760,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
12673 union lpfc_sli4_cfg_shdr *shdr; 12760 union lpfc_sli4_cfg_shdr *shdr;
12674 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12761 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12675 struct dma_address *page; 12762 struct dma_address *page;
12763 void __iomem *bar_memmap_p;
12764 uint32_t db_offset;
12765 uint16_t pci_barset;
12676 12766
12677 /* sanity check on queue memory */ 12767 /* sanity check on queue memory */
12678 if (!wq || !cq) 12768 if (!wq || !cq)
@@ -12696,6 +12786,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
12696 cq->queue_id); 12786 cq->queue_id);
12697 bf_set(lpfc_mbox_hdr_version, &shdr->request, 12787 bf_set(lpfc_mbox_hdr_version, &shdr->request,
12698 phba->sli4_hba.pc_sli4_params.wqv); 12788 phba->sli4_hba.pc_sli4_params.wqv);
12789
12699 if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) { 12790 if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) {
12700 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, 12791 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
12701 wq->entry_count); 12792 wq->entry_count);
@@ -12723,6 +12814,10 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
12723 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); 12814 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
12724 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); 12815 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
12725 } 12816 }
12817
12818 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
12819 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
12820
12726 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12821 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12727 /* The IOCTL status is embedded in the mailbox subheader. */ 12822 /* The IOCTL status is embedded in the mailbox subheader. */
12728 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12823 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
@@ -12740,6 +12835,47 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
12740 status = -ENXIO; 12835 status = -ENXIO;
12741 goto out; 12836 goto out;
12742 } 12837 }
12838 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
12839 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
12840 &wq_create->u.response);
12841 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
12842 (wq->db_format != LPFC_DB_RING_FORMAT)) {
12843 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12844 "3265 WQ[%d] doorbell format not "
12845 "supported: x%x\n", wq->queue_id,
12846 wq->db_format);
12847 status = -EINVAL;
12848 goto out;
12849 }
12850 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
12851 &wq_create->u.response);
12852 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
12853 if (!bar_memmap_p) {
12854 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12855 "3263 WQ[%d] failed to memmap pci "
12856 "barset:x%x\n", wq->queue_id,
12857 pci_barset);
12858 status = -ENOMEM;
12859 goto out;
12860 }
12861 db_offset = wq_create->u.response.doorbell_offset;
12862 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
12863 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
12864 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12865 "3252 WQ[%d] doorbell offset not "
12866 "supported: x%x\n", wq->queue_id,
12867 db_offset);
12868 status = -EINVAL;
12869 goto out;
12870 }
12871 wq->db_regaddr = bar_memmap_p + db_offset;
12872 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12873 "3264 WQ[%d]: barset:x%x, offset:x%x\n",
12874 wq->queue_id, pci_barset, db_offset);
12875 } else {
12876 wq->db_format = LPFC_DB_LIST_FORMAT;
12877 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
12878 }
12743 wq->type = LPFC_WQ; 12879 wq->type = LPFC_WQ;
12744 wq->assoc_qid = cq->queue_id; 12880 wq->assoc_qid = cq->queue_id;
12745 wq->subtype = subtype; 12881 wq->subtype = subtype;
@@ -12816,6 +12952,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
12816 uint32_t shdr_status, shdr_add_status; 12952 uint32_t shdr_status, shdr_add_status;
12817 union lpfc_sli4_cfg_shdr *shdr; 12953 union lpfc_sli4_cfg_shdr *shdr;
12818 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12954 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12955 void __iomem *bar_memmap_p;
12956 uint32_t db_offset;
12957 uint16_t pci_barset;
12819 12958
12820 /* sanity check on queue memory */ 12959 /* sanity check on queue memory */
12821 if (!hrq || !drq || !cq) 12960 if (!hrq || !drq || !cq)
@@ -12894,6 +13033,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
12894 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 13033 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12895 putPaddrHigh(dmabuf->phys); 13034 putPaddrHigh(dmabuf->phys);
12896 } 13035 }
13036 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
13037 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
13038
12897 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13039 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12898 /* The IOCTL status is embedded in the mailbox subheader. */ 13040 /* The IOCTL status is embedded in the mailbox subheader. */
12899 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13041 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
@@ -12911,6 +13053,50 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
12911 status = -ENXIO; 13053 status = -ENXIO;
12912 goto out; 13054 goto out;
12913 } 13055 }
13056
13057 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
13058 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
13059 &rq_create->u.response);
13060 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
13061 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
13062 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13063 "3262 RQ [%d] doorbell format not "
13064 "supported: x%x\n", hrq->queue_id,
13065 hrq->db_format);
13066 status = -EINVAL;
13067 goto out;
13068 }
13069
13070 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
13071 &rq_create->u.response);
13072 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
13073 if (!bar_memmap_p) {
13074 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13075 "3269 RQ[%d] failed to memmap pci "
13076 "barset:x%x\n", hrq->queue_id,
13077 pci_barset);
13078 status = -ENOMEM;
13079 goto out;
13080 }
13081
13082 db_offset = rq_create->u.response.doorbell_offset;
13083 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
13084 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
13085 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13086 "3270 RQ[%d] doorbell offset not "
13087 "supported: x%x\n", hrq->queue_id,
13088 db_offset);
13089 status = -EINVAL;
13090 goto out;
13091 }
13092 hrq->db_regaddr = bar_memmap_p + db_offset;
13093 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13094 "3266 RQ[qid:%d]: barset:x%x, offset:x%x\n",
13095 hrq->queue_id, pci_barset, db_offset);
13096 } else {
13097 hrq->db_format = LPFC_DB_RING_FORMAT;
13098 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
13099 }
12914 hrq->type = LPFC_HRQ; 13100 hrq->type = LPFC_HRQ;
12915 hrq->assoc_qid = cq->queue_id; 13101 hrq->assoc_qid = cq->queue_id;
12916 hrq->subtype = subtype; 13102 hrq->subtype = subtype;
@@ -12976,6 +13162,8 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
12976 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 13162 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12977 putPaddrHigh(dmabuf->phys); 13163 putPaddrHigh(dmabuf->phys);
12978 } 13164 }
13165 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
13166 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
12979 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13167 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12980 /* The IOCTL status is embedded in the mailbox subheader. */ 13168 /* The IOCTL status is embedded in the mailbox subheader. */
12981 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 13169 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
@@ -14063,6 +14251,40 @@ lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
14063} 14251}
14064 14252
14065/** 14253/**
14254 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
14255 * @vport: pointer to a vitural port
14256 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14257 *
14258 * This function tries to abort from the assembed sequence from upper level
14259 * protocol, described by the information from basic abbort @dmabuf. It
14260 * checks to see whether such pending context exists at upper level protocol.
14261 * If so, it shall clean up the pending context.
14262 *
14263 * Return
14264 * true -- if there is matching pending context of the sequence cleaned
14265 * at ulp;
14266 * false -- if there is no matching pending context of the sequence present
14267 * at ulp.
14268 **/
14269static bool
14270lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
14271{
14272 struct lpfc_hba *phba = vport->phba;
14273 int handled;
14274
14275 /* Accepting abort at ulp with SLI4 only */
14276 if (phba->sli_rev < LPFC_SLI_REV4)
14277 return false;
14278
14279 /* Register all caring upper level protocols to attend abort */
14280 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
14281 if (handled)
14282 return true;
14283
14284 return false;
14285}
14286
14287/**
14066 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler 14288 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
14067 * @phba: Pointer to HBA context object. 14289 * @phba: Pointer to HBA context object.
14068 * @cmd_iocbq: pointer to the command iocbq structure. 14290 * @cmd_iocbq: pointer to the command iocbq structure.
@@ -14077,8 +14299,14 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
14077 struct lpfc_iocbq *cmd_iocbq, 14299 struct lpfc_iocbq *cmd_iocbq,
14078 struct lpfc_iocbq *rsp_iocbq) 14300 struct lpfc_iocbq *rsp_iocbq)
14079{ 14301{
14080 if (cmd_iocbq) 14302 struct lpfc_nodelist *ndlp;
14303
14304 if (cmd_iocbq) {
14305 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
14306 lpfc_nlp_put(ndlp);
14307 lpfc_nlp_not_used(ndlp);
14081 lpfc_sli_release_iocbq(phba, cmd_iocbq); 14308 lpfc_sli_release_iocbq(phba, cmd_iocbq);
14309 }
14082 14310
14083 /* Failure means BLS ABORT RSP did not get delivered to remote node*/ 14311 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
14084 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus) 14312 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
@@ -14118,9 +14346,10 @@ lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
14118 * event after aborting the sequence handling. 14346 * event after aborting the sequence handling.
14119 **/ 14347 **/
14120static void 14348static void
14121lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba, 14349lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
14122 struct fc_frame_header *fc_hdr) 14350 struct fc_frame_header *fc_hdr, bool aborted)
14123{ 14351{
14352 struct lpfc_hba *phba = vport->phba;
14124 struct lpfc_iocbq *ctiocb = NULL; 14353 struct lpfc_iocbq *ctiocb = NULL;
14125 struct lpfc_nodelist *ndlp; 14354 struct lpfc_nodelist *ndlp;
14126 uint16_t oxid, rxid, xri, lxri; 14355 uint16_t oxid, rxid, xri, lxri;
@@ -14135,12 +14364,27 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
14135 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 14364 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
14136 rxid = be16_to_cpu(fc_hdr->fh_rx_id); 14365 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
14137 14366
14138 ndlp = lpfc_findnode_did(phba->pport, sid); 14367 ndlp = lpfc_findnode_did(vport, sid);
14139 if (!ndlp) { 14368 if (!ndlp) {
14140 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 14369 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
14141 "1268 Find ndlp returned NULL for oxid:x%x " 14370 if (!ndlp) {
14142 "SID:x%x\n", oxid, sid); 14371 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
14143 return; 14372 "1268 Failed to allocate ndlp for "
14373 "oxid:x%x SID:x%x\n", oxid, sid);
14374 return;
14375 }
14376 lpfc_nlp_init(vport, ndlp, sid);
14377 /* Put ndlp onto pport node list */
14378 lpfc_enqueue_node(vport, ndlp);
14379 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
14380 /* re-setup ndlp without removing from node list */
14381 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
14382 if (!ndlp) {
14383 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
14384 "3275 Failed to active ndlp found "
14385 "for oxid:x%x SID:x%x\n", oxid, sid);
14386 return;
14387 }
14144 } 14388 }
14145 14389
14146 /* Allocate buffer for rsp iocb */ 14390 /* Allocate buffer for rsp iocb */
@@ -14164,7 +14408,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
14164 icmd->ulpLe = 1; 14408 icmd->ulpLe = 1;
14165 icmd->ulpClass = CLASS3; 14409 icmd->ulpClass = CLASS3;
14166 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 14410 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
14167 ctiocb->context1 = ndlp; 14411 ctiocb->context1 = lpfc_nlp_get(ndlp);
14168 14412
14169 ctiocb->iocb_cmpl = NULL; 14413 ctiocb->iocb_cmpl = NULL;
14170 ctiocb->vport = phba->pport; 14414 ctiocb->vport = phba->pport;
@@ -14183,14 +14427,24 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
14183 if (lxri != NO_XRI) 14427 if (lxri != NO_XRI)
14184 lpfc_set_rrq_active(phba, ndlp, lxri, 14428 lpfc_set_rrq_active(phba, ndlp, lxri,
14185 (xri == oxid) ? rxid : oxid, 0); 14429 (xri == oxid) ? rxid : oxid, 0);
14186 /* If the oxid maps to the FCP XRI range or if it is out of range, 14430 /* For BA_ABTS from exchange responder, if the logical xri with
14187 * send a BLS_RJT. The driver no longer has that exchange. 14431 * the oxid maps to the FCP XRI range, the port no longer has
14188 * Override the IOCB for a BA_RJT. 14432 * that exchange context, send a BLS_RJT. Override the IOCB for
14433 * a BA_RJT.
14189 */ 14434 */
14190 if (xri > (phba->sli4_hba.max_cfg_param.max_xri + 14435 if ((fctl & FC_FC_EX_CTX) &&
14191 phba->sli4_hba.max_cfg_param.xri_base) || 14436 (lxri > lpfc_sli4_get_els_iocb_cnt(phba))) {
14192 xri > (lpfc_sli4_get_els_iocb_cnt(phba) + 14437 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
14193 phba->sli4_hba.max_cfg_param.xri_base)) { 14438 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
14439 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
14440 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
14441 }
14442
14443 /* If BA_ABTS failed to abort a partially assembled receive sequence,
14444 * the driver no longer has that exchange, send a BLS_RJT. Override
14445 * the IOCB for a BA_RJT.
14446 */
14447 if (aborted == false) {
14194 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 14448 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
14195 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 14449 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
14196 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 14450 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
@@ -14214,17 +14468,19 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
14214 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); 14468 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
14215 14469
14216 /* Xmit CT abts response on exchange <xid> */ 14470 /* Xmit CT abts response on exchange <xid> */
14217 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 14471 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
14218 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", 14472 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
14219 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state); 14473 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
14220 14474
14221 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 14475 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
14222 if (rc == IOCB_ERROR) { 14476 if (rc == IOCB_ERROR) {
14223 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 14477 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
14224 "2925 Failed to issue CT ABTS RSP x%x on " 14478 "2925 Failed to issue CT ABTS RSP x%x on "
14225 "xri x%x, Data x%x\n", 14479 "xri x%x, Data x%x\n",
14226 icmd->un.xseq64.w5.hcsw.Rctl, oxid, 14480 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
14227 phba->link_state); 14481 phba->link_state);
14482 lpfc_nlp_put(ndlp);
14483 ctiocb->context1 = NULL;
14228 lpfc_sli_release_iocbq(phba, ctiocb); 14484 lpfc_sli_release_iocbq(phba, ctiocb);
14229 } 14485 }
14230} 14486}
@@ -14249,32 +14505,25 @@ lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
14249 struct lpfc_hba *phba = vport->phba; 14505 struct lpfc_hba *phba = vport->phba;
14250 struct fc_frame_header fc_hdr; 14506 struct fc_frame_header fc_hdr;
14251 uint32_t fctl; 14507 uint32_t fctl;
14252 bool abts_par; 14508 bool aborted;
14253 14509
14254 /* Make a copy of fc_hdr before the dmabuf being released */ 14510 /* Make a copy of fc_hdr before the dmabuf being released */
14255 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 14511 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
14256 fctl = sli4_fctl_from_fc_hdr(&fc_hdr); 14512 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
14257 14513
14258 if (fctl & FC_FC_EX_CTX) { 14514 if (fctl & FC_FC_EX_CTX) {
14259 /* 14515 /* ABTS by responder to exchange, no cleanup needed */
14260 * ABTS sent by responder to exchange, just free the buffer 14516 aborted = true;
14261 */
14262 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14263 } else { 14517 } else {
14264 /* 14518 /* ABTS by initiator to exchange, need to do cleanup */
14265 * ABTS sent by initiator to exchange, need to do cleanup 14519 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
14266 */ 14520 if (aborted == false)
14267 /* Try to abort partially assembled seq */ 14521 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
14268 abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf);
14269
14270 /* Send abort to ULP if partially seq abort failed */
14271 if (abts_par == false)
14272 lpfc_sli4_send_seq_to_ulp(vport, dmabuf);
14273 else
14274 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14275 } 14522 }
14276 /* Send basic accept (BA_ACC) to the abort requester */ 14523 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14277 lpfc_sli4_seq_abort_rsp(phba, &fc_hdr); 14524
14525 /* Respond with BA_ACC or BA_RJT accordingly */
14526 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
14278} 14527}
14279 14528
14280/** 14529/**
@@ -15307,10 +15556,13 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
15307{ 15556{
15308 uint16_t next_fcf_index; 15557 uint16_t next_fcf_index;
15309 15558
15559initial_priority:
15310 /* Search start from next bit of currently registered FCF index */ 15560 /* Search start from next bit of currently registered FCF index */
15561 next_fcf_index = phba->fcf.current_rec.fcf_indx;
15562
15311next_priority: 15563next_priority:
15312 next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) % 15564 /* Determine the next fcf index to check */
15313 LPFC_SLI4_FCF_TBL_INDX_MAX; 15565 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
15314 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 15566 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
15315 LPFC_SLI4_FCF_TBL_INDX_MAX, 15567 LPFC_SLI4_FCF_TBL_INDX_MAX,
15316 next_fcf_index); 15568 next_fcf_index);
@@ -15337,7 +15589,7 @@ next_priority:
15337 * at that level and continue the selection process. 15589 * at that level and continue the selection process.
15338 */ 15590 */
15339 if (lpfc_check_next_fcf_pri_level(phba)) 15591 if (lpfc_check_next_fcf_pri_level(phba))
15340 goto next_priority; 15592 goto initial_priority;
15341 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 15593 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
15342 "2844 No roundrobin failover FCF available\n"); 15594 "2844 No roundrobin failover FCF available\n");
15343 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) 15595 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 44c427a45d66..be02b59ea279 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -139,6 +139,10 @@ struct lpfc_queue {
139 139
140 struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */ 140 struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */
141 141
142 uint16_t db_format;
143#define LPFC_DB_RING_FORMAT 0x01
144#define LPFC_DB_LIST_FORMAT 0x02
145 void __iomem *db_regaddr;
142 /* For q stats */ 146 /* For q stats */
143 uint32_t q_cnt_1; 147 uint32_t q_cnt_1;
144 uint32_t q_cnt_2; 148 uint32_t q_cnt_2;
@@ -508,6 +512,10 @@ struct lpfc_sli4_hba {
508 struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */ 512 struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
509 struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */ 513 struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
510 514
515 uint8_t fw_func_mode; /* FW function protocol mode */
516 uint32_t ulp0_mode; /* ULP0 protocol mode */
517 uint32_t ulp1_mode; /* ULP1 protocol mode */
518
511 /* Setup information for various queue parameters */ 519 /* Setup information for various queue parameters */
512 int eq_esize; 520 int eq_esize;
513 int eq_ecount; 521 int eq_ecount;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index ba596e854bbc..f3b7795a296b 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.36" 21#define LPFC_DRIVER_VERSION "8.3.37"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23 23
24/* Used for SLI 2/3 */ 24/* Used for SLI 2/3 */