aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorJames Smart <james.smart@emulex.com>2011-05-24 11:44:12 -0400
committerJames Bottomley <jbottomley@parallels.com>2011-05-26 23:49:38 -0400
commit6d368e532168cb621731b3936945cd910cb25bd0 (patch)
tree6d49d2dea91c6637ab4cf38b61ec3a0eecc0bdfb /drivers/scsi
parent52d5244096017bbd11164479116baceaede342b0 (diff)
[SCSI] lpfc 8.3.24: Add resource extent support
This patch adds support for hardware that returns resource ids via extents rather than contiguous ranges. [jejb: checkpatch.pl fixes] Signed-off-by: Alex Iannicelli <alex.iannicelli@emulex.com> Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <jbottomley@parallels.com>
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/lpfc/lpfc.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c32
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h205
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c127
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c166
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c17
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c53
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c1523
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h27
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c2
17 files changed, 1937 insertions, 276 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 9d0bfba5461e..dfd9ace862e7 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -780,6 +780,9 @@ struct lpfc_hba {
780 uint16_t vpi_base; 780 uint16_t vpi_base;
781 uint16_t vfi_base; 781 uint16_t vfi_base;
782 unsigned long *vpi_bmask; /* vpi allocation table */ 782 unsigned long *vpi_bmask; /* vpi allocation table */
783 uint16_t *vpi_ids;
784 uint16_t vpi_count;
785 struct list_head lpfc_vpi_blk_list;
783 786
784 /* Data structure used by fabric iocb scheduler */ 787 /* Data structure used by fabric iocb scheduler */
785 struct list_head fabric_iocb_list; 788 struct list_head fabric_iocb_list;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 853e5042f39c..080187b0e701 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -332,6 +332,8 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
332 cmd->ulpLe = 1; 332 cmd->ulpLe = 1;
333 cmd->ulpClass = CLASS3; 333 cmd->ulpClass = CLASS3;
334 cmd->ulpContext = ndlp->nlp_rpi; 334 cmd->ulpContext = ndlp->nlp_rpi;
335 if (phba->sli_rev == LPFC_SLI_REV4)
336 cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
335 cmd->ulpOwner = OWN_CHIP; 337 cmd->ulpOwner = OWN_CHIP;
336 cmdiocbq->vport = phba->pport; 338 cmdiocbq->vport = phba->pport;
337 cmdiocbq->context3 = bmp; 339 cmdiocbq->context3 = bmp;
@@ -1336,6 +1338,10 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1336 } 1338 }
1337 1339
1338 icmd->un.ulpWord[3] = ndlp->nlp_rpi; 1340 icmd->un.ulpWord[3] = ndlp->nlp_rpi;
1341 if (phba->sli_rev == LPFC_SLI_REV4)
1342 icmd->ulpContext =
1343 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
1344
1339 /* The exchange is done, mark the entry as invalid */ 1345 /* The exchange is done, mark the entry as invalid */
1340 phba->ct_ctx[tag].flags &= ~UNSOL_VALID; 1346 phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
1341 } else 1347 } else
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 0b63cb2610d0..fc20c247f36b 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -55,6 +55,8 @@ void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
55void lpfc_supported_pages(struct lpfcMboxq *); 55void lpfc_supported_pages(struct lpfcMboxq *);
56void lpfc_pc_sli4_params(struct lpfcMboxq *); 56void lpfc_pc_sli4_params(struct lpfcMboxq *);
57int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *); 57int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
58int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *,
59 uint16_t, uint16_t, bool);
58int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *); 60int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
59struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); 61struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
60void lpfc_cleanup_rcv_buffers(struct lpfc_vport *); 62void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
@@ -366,6 +368,10 @@ extern void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *, char *, uint32_t,
366 uint32_t, uint32_t); 368 uint32_t, uint32_t);
367extern struct lpfc_hbq_init *lpfc_hbq_defs[]; 369extern struct lpfc_hbq_init *lpfc_hbq_defs[];
368 370
371/* SLI4 if_type 2 externs. */
372int lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *);
373int lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *);
374
369/* externs BlockGuard */ 375/* externs BlockGuard */
370extern char *_dump_buf_data; 376extern char *_dump_buf_data;
371extern unsigned long _dump_buf_data_order; 377extern unsigned long _dump_buf_data_order;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index d9edfd90d7ff..779b88e1469d 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -352,6 +352,8 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
352 icmd->ulpLe = 1; 352 icmd->ulpLe = 1;
353 icmd->ulpClass = CLASS3; 353 icmd->ulpClass = CLASS3;
354 icmd->ulpContext = ndlp->nlp_rpi; 354 icmd->ulpContext = ndlp->nlp_rpi;
355 if (phba->sli_rev == LPFC_SLI_REV4)
356 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
355 357
356 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 358 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
357 /* For GEN_REQUEST64_CR, use the RPI */ 359 /* For GEN_REQUEST64_CR, use the RPI */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index e2c452467c8b..32a084534f3e 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -250,7 +250,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
250 icmd->un.elsreq64.myID = vport->fc_myDID; 250 icmd->un.elsreq64.myID = vport->fc_myDID;
251 251
252 /* For ELS_REQUEST64_CR, use the VPI by default */ 252 /* For ELS_REQUEST64_CR, use the VPI by default */
253 icmd->ulpContext = vport->vpi + phba->vpi_base; 253 icmd->ulpContext = phba->vpi_ids[vport->vpi];
254 icmd->ulpCt_h = 0; 254 icmd->ulpCt_h = 0;
255 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ 255 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
256 if (elscmd == ELS_CMD_ECHO) 256 if (elscmd == ELS_CMD_ECHO)
@@ -454,6 +454,7 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
454 rc = -ENOMEM; 454 rc = -ENOMEM;
455 goto fail_free_dmabuf; 455 goto fail_free_dmabuf;
456 } 456 }
457
457 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 458 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
458 if (!mboxq) { 459 if (!mboxq) {
459 rc = -ENOMEM; 460 rc = -ENOMEM;
@@ -6585,6 +6586,26 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
6585{ 6586{
6586 struct lpfc_vport *vport; 6587 struct lpfc_vport *vport;
6587 unsigned long flags; 6588 unsigned long flags;
6589 int i;
6590
6591 /* The physical ports are always vpi 0 - translate is unnecessary. */
6592 if (vpi > 0) {
6593 /*
6594 * Translate the physical vpi to the logical vpi. The
6595 * vport stores the logical vpi.
6596 */
6597 for (i = 0; i < phba->max_vpi; i++) {
6598 if (vpi == phba->vpi_ids[i])
6599 break;
6600 }
6601
6602 if (i >= phba->max_vpi) {
6603 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
6604 "2936 Could not find Vport mapped "
6605 "to vpi %d\n", vpi);
6606 return NULL;
6607 }
6608 }
6588 6609
6589 spin_lock_irqsave(&phba->hbalock, flags); 6610 spin_lock_irqsave(&phba->hbalock, flags);
6590 list_for_each_entry(vport, &phba->port_list, listentry) { 6611 list_for_each_entry(vport, &phba->port_list, listentry) {
@@ -6641,8 +6662,9 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6641 vport = phba->pport; 6662 vport = phba->pport;
6642 else 6663 else
6643 vport = lpfc_find_vport_by_vpid(phba, 6664 vport = lpfc_find_vport_by_vpid(phba,
6644 icmd->unsli3.rcvsli3.vpi - phba->vpi_base); 6665 icmd->unsli3.rcvsli3.vpi);
6645 } 6666 }
6667
6646 /* If there are no BDEs associated 6668 /* If there are no BDEs associated
6647 * with this IOCB, there is nothing to do. 6669 * with this IOCB, there is nothing to do.
6648 */ 6670 */
@@ -7222,7 +7244,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
7222 elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1; 7244 elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1;
7223 elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ; 7245 elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ;
7224 /* Set the ulpContext to the vpi */ 7246 /* Set the ulpContext to the vpi */
7225 elsiocb->iocb.ulpContext = vport->vpi + phba->vpi_base; 7247 elsiocb->iocb.ulpContext = phba->vpi_ids[vport->vpi];
7226 } else { 7248 } else {
7227 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */ 7249 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */
7228 icmd->ulpCt_h = 1; 7250 icmd->ulpCt_h = 1;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 2653c844d20d..18d0dbfda2bc 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -881,7 +881,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
881 /* Clean up any firmware default rpi's */ 881 /* Clean up any firmware default rpi's */
882 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 882 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
883 if (mb) { 883 if (mb) {
884 lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb); 884 lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
885 mb->vport = vport; 885 mb->vport = vport;
886 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 886 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
887 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) 887 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
@@ -3421,7 +3421,8 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3421 return; 3421 return;
3422 } 3422 }
3423 3423
3424 ndlp->nlp_rpi = mb->un.varWords[0]; 3424 if (phba->sli_rev < LPFC_SLI_REV4)
3425 ndlp->nlp_rpi = mb->un.varWords[0];
3425 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 3426 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3426 ndlp->nlp_type |= NLP_FABRIC; 3427 ndlp->nlp_type |= NLP_FABRIC;
3427 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 3428 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -3495,7 +3496,8 @@ out:
3495 return; 3496 return;
3496 } 3497 }
3497 3498
3498 ndlp->nlp_rpi = mb->un.varWords[0]; 3499 if (phba->sli_rev < LPFC_SLI_REV4)
3500 ndlp->nlp_rpi = mb->un.varWords[0];
3499 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 3501 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3500 ndlp->nlp_type |= NLP_FABRIC; 3502 ndlp->nlp_type |= NLP_FABRIC;
3501 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 3503 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -3582,7 +3584,6 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3582 if (ndlp->nlp_type & NLP_FCP_INITIATOR) 3584 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
3583 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; 3585 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
3584 3586
3585
3586 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) 3587 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
3587 fc_remote_port_rolechg(rport, rport_ids.roles); 3588 fc_remote_port_rolechg(rport, rport_ids.roles);
3588 3589
@@ -4097,11 +4098,16 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4097 struct lpfc_hba *phba = vport->phba; 4098 struct lpfc_hba *phba = vport->phba;
4098 LPFC_MBOXQ_t *mbox; 4099 LPFC_MBOXQ_t *mbox;
4099 int rc; 4100 int rc;
4101 uint16_t rpi;
4100 4102
4101 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { 4103 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
4102 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4104 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4103 if (mbox) { 4105 if (mbox) {
4104 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox); 4106 /* SLI4 ports require the physical rpi value. */
4107 rpi = ndlp->nlp_rpi;
4108 if (phba->sli_rev == LPFC_SLI_REV4)
4109 rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4110 lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
4105 mbox->vport = vport; 4111 mbox->vport = vport;
4106 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4112 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4107 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4113 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@@ -4170,7 +4176,8 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport)
4170 4176
4171 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4177 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4172 if (mbox) { 4178 if (mbox) {
4173 lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox); 4179 lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT,
4180 mbox);
4174 mbox->vport = vport; 4181 mbox->vport = vport;
4175 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4182 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4176 mbox->context1 = NULL; 4183 mbox->context1 = NULL;
@@ -4194,7 +4201,8 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
4194 4201
4195 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4202 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4196 if (mbox) { 4203 if (mbox) {
4197 lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox); 4204 lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
4205 mbox);
4198 mbox->vport = vport; 4206 mbox->vport = vport;
4199 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4207 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4200 mbox->context1 = NULL; 4208 mbox->context1 = NULL;
@@ -4644,10 +4652,7 @@ lpfc_disc_start(struct lpfc_vport *vport)
4644 if (num_sent) 4652 if (num_sent)
4645 return; 4653 return;
4646 4654
4647 /* 4655 /* Register the VPI for SLI3, NON-NPIV only. */
4648 * For SLI3, cmpl_reg_vpi will set port_state to READY, and
4649 * continue discovery.
4650 */
4651 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4656 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4652 !(vport->fc_flag & FC_PT2PT) && 4657 !(vport->fc_flag & FC_PT2PT) &&
4653 !(vport->fc_flag & FC_RSCN_MODE) && 4658 !(vport->fc_flag & FC_RSCN_MODE) &&
@@ -4934,7 +4939,7 @@ restart_disc:
4934 if (phba->sli_rev < LPFC_SLI_REV4) { 4939 if (phba->sli_rev < LPFC_SLI_REV4) {
4935 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 4940 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
4936 lpfc_issue_reg_vpi(phba, vport); 4941 lpfc_issue_reg_vpi(phba, vport);
4937 else { /* NPIV Not enabled */ 4942 else {
4938 lpfc_issue_clear_la(phba, vport); 4943 lpfc_issue_clear_la(phba, vport);
4939 vport->port_state = LPFC_VPORT_READY; 4944 vport->port_state = LPFC_VPORT_READY;
4940 } 4945 }
@@ -5060,7 +5065,8 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5060 pmb->context1 = NULL; 5065 pmb->context1 = NULL;
5061 pmb->context2 = NULL; 5066 pmb->context2 = NULL;
5062 5067
5063 ndlp->nlp_rpi = mb->un.varWords[0]; 5068 if (phba->sli_rev < LPFC_SLI_REV4)
5069 ndlp->nlp_rpi = mb->un.varWords[0];
5064 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 5070 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
5065 ndlp->nlp_type |= NLP_FABRIC; 5071 ndlp->nlp_type |= NLP_FABRIC;
5066 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 5072 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index bb3af9fabd7e..9059524cf225 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -64,6 +64,8 @@
64#define SLI3_IOCB_CMD_SIZE 128 64#define SLI3_IOCB_CMD_SIZE 128
65#define SLI3_IOCB_RSP_SIZE 64 65#define SLI3_IOCB_RSP_SIZE 64
66 66
67#define LPFC_UNREG_ALL_RPIS_VPORT 0xffff
68#define LPFC_UNREG_ALL_DFLT_RPIS 0xffffffff
67 69
68/* vendor ID used in SCSI netlink calls */ 70/* vendor ID used in SCSI netlink calls */
69#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX) 71#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX)
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 61a40fd1ad18..f14db2d17f29 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -229,9 +229,26 @@ struct ulp_bde64 {
229 229
230struct lpfc_sli4_flags { 230struct lpfc_sli4_flags {
231 uint32_t word0; 231 uint32_t word0;
232#define lpfc_fip_flag_SHIFT 0 232#define lpfc_idx_rsrc_rdy_SHIFT 0
233#define lpfc_fip_flag_MASK 0x00000001 233#define lpfc_idx_rsrc_rdy_MASK 0x00000001
234#define lpfc_fip_flag_WORD word0 234#define lpfc_idx_rsrc_rdy_WORD word0
235#define LPFC_IDX_RSRC_RDY 1
236#define lpfc_xri_rsrc_rdy_SHIFT 1
237#define lpfc_xri_rsrc_rdy_MASK 0x00000001
238#define lpfc_xri_rsrc_rdy_WORD word0
239#define LPFC_XRI_RSRC_RDY 1
240#define lpfc_rpi_rsrc_rdy_SHIFT 2
241#define lpfc_rpi_rsrc_rdy_MASK 0x00000001
242#define lpfc_rpi_rsrc_rdy_WORD word0
243#define LPFC_RPI_RSRC_RDY 1
244#define lpfc_vpi_rsrc_rdy_SHIFT 3
245#define lpfc_vpi_rsrc_rdy_MASK 0x00000001
246#define lpfc_vpi_rsrc_rdy_WORD word0
247#define LPFC_VPI_RSRC_RDY 1
248#define lpfc_vfi_rsrc_rdy_SHIFT 4
249#define lpfc_vfi_rsrc_rdy_MASK 0x00000001
250#define lpfc_vfi_rsrc_rdy_WORD word0
251#define LPFC_VFI_RSRC_RDY 1
235}; 252};
236 253
237struct sli4_bls_rsp { 254struct sli4_bls_rsp {
@@ -791,12 +808,22 @@ union lpfc_sli4_cfg_shdr {
791 } response; 808 } response;
792}; 809};
793 810
794/* Mailbox structures */ 811/* Mailbox Header structures.
812 * struct mbox_header is defined for first generation SLI4_CFG mailbox
813 * calls deployed for BE-based ports.
814 *
815 * struct sli4_mbox_header is defined for second generation SLI4
816 * ports that don't deploy the SLI4_CFG mechanism.
817 */
795struct mbox_header { 818struct mbox_header {
796 struct lpfc_sli4_cfg_mhdr cfg_mhdr; 819 struct lpfc_sli4_cfg_mhdr cfg_mhdr;
797 union lpfc_sli4_cfg_shdr cfg_shdr; 820 union lpfc_sli4_cfg_shdr cfg_shdr;
798}; 821};
799 822
823#define LPFC_EXTENT_LOCAL 0
824#define LPFC_TIMEOUT_DEFAULT 0
825#define LPFC_EXTENT_VERSION_DEFAULT 0
826
800/* Subsystem Definitions */ 827/* Subsystem Definitions */
801#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1 828#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1
802#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC 829#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC
@@ -819,6 +846,10 @@ struct mbox_header {
819#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A 846#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
820#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D 847#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
821#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A 848#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
849#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A
850#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B
851#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C
852#define LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT 0x9D
822#define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG 0xA0 853#define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG 0xA0
823#define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG 0xA4 854#define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG 0xA4
824#define LPFC_MBOX_OPCODE_WRITE_OBJECT 0xAC 855#define LPFC_MBOX_OPCODE_WRITE_OBJECT 0xAC
@@ -1238,6 +1269,110 @@ struct lpfc_mbx_mq_destroy {
1238 } u; 1269 } u;
1239}; 1270};
1240 1271
1272/* Start Gen 2 SLI4 Mailbox definitions: */
1273
1274/* Define allocate-ready Gen 2 SLI4 FCoE Resource Extent Types. */
1275#define LPFC_RSC_TYPE_FCOE_VFI 0x20
1276#define LPFC_RSC_TYPE_FCOE_VPI 0x21
1277#define LPFC_RSC_TYPE_FCOE_RPI 0x22
1278#define LPFC_RSC_TYPE_FCOE_XRI 0x23
1279
1280struct lpfc_mbx_get_rsrc_extent_info {
1281 struct mbox_header header;
1282 union {
1283 struct {
1284 uint32_t word4;
1285#define lpfc_mbx_get_rsrc_extent_info_type_SHIFT 0
1286#define lpfc_mbx_get_rsrc_extent_info_type_MASK 0x0000FFFF
1287#define lpfc_mbx_get_rsrc_extent_info_type_WORD word4
1288 } req;
1289 struct {
1290 uint32_t word4;
1291#define lpfc_mbx_get_rsrc_extent_info_cnt_SHIFT 0
1292#define lpfc_mbx_get_rsrc_extent_info_cnt_MASK 0x0000FFFF
1293#define lpfc_mbx_get_rsrc_extent_info_cnt_WORD word4
1294#define lpfc_mbx_get_rsrc_extent_info_size_SHIFT 16
1295#define lpfc_mbx_get_rsrc_extent_info_size_MASK 0x0000FFFF
1296#define lpfc_mbx_get_rsrc_extent_info_size_WORD word4
1297 } rsp;
1298 } u;
1299};
1300
1301struct lpfc_id_range {
1302 uint32_t word5;
1303#define lpfc_mbx_rsrc_id_word4_0_SHIFT 0
1304#define lpfc_mbx_rsrc_id_word4_0_MASK 0x0000FFFF
1305#define lpfc_mbx_rsrc_id_word4_0_WORD word5
1306#define lpfc_mbx_rsrc_id_word4_1_SHIFT 16
1307#define lpfc_mbx_rsrc_id_word4_1_MASK 0x0000FFFF
1308#define lpfc_mbx_rsrc_id_word4_1_WORD word5
1309};
1310
1311/*
1312 * struct lpfc_mbx_alloc_rsrc_extents:
1313 * A mbox is generically 256 bytes long. An SLI4_CONFIG mailbox requires
1314 * 6 words of header + 4 words of shared subcommand header +
1315 * 1 words of Extent-Opcode-specific header = 11 words or 44 bytes total.
1316 *
1317 * An embedded version of SLI4_CONFIG therefore has 256 - 44 = 212 bytes
1318 * for extents payload.
1319 *
1320 * 212/2 (bytes per extent) = 106 extents.
1321 * 106/2 (extents per word) = 53 words.
1322 * lpfc_id_range id is statically size to 53.
1323 *
1324 * This mailbox definition is used for ALLOC or GET_ALLOCATED
1325 * extent ranges. For ALLOC, the type and cnt are required.
1326 * For GET_ALLOCATED, only the type is required.
1327 */
1328struct lpfc_mbx_alloc_rsrc_extents {
1329 struct mbox_header header;
1330 union {
1331 struct {
1332 uint32_t word4;
1333#define lpfc_mbx_alloc_rsrc_extents_type_SHIFT 0
1334#define lpfc_mbx_alloc_rsrc_extents_type_MASK 0x0000FFFF
1335#define lpfc_mbx_alloc_rsrc_extents_type_WORD word4
1336#define lpfc_mbx_alloc_rsrc_extents_cnt_SHIFT 16
1337#define lpfc_mbx_alloc_rsrc_extents_cnt_MASK 0x0000FFFF
1338#define lpfc_mbx_alloc_rsrc_extents_cnt_WORD word4
1339 } req;
1340 struct {
1341 uint32_t word4;
1342#define lpfc_mbx_rsrc_cnt_SHIFT 0
1343#define lpfc_mbx_rsrc_cnt_MASK 0x0000FFFF
1344#define lpfc_mbx_rsrc_cnt_WORD word4
1345 struct lpfc_id_range id[53];
1346 } rsp;
1347 } u;
1348};
1349
1350/*
1351 * This is the non-embedded version of ALLOC or GET RSRC_EXTENTS. Word4 in this
1352 * structure shares the same SHIFT/MASK/WORD defines provided in the
1353 * mbx_alloc_rsrc_extents and mbx_get_alloc_rsrc_extents, word4, provided in
1354 * the structures defined above. This non-embedded structure provides for the
1355 * maximum number of extents supported by the port.
1356 */
1357struct lpfc_mbx_nembed_rsrc_extent {
1358 union lpfc_sli4_cfg_shdr cfg_shdr;
1359 uint32_t word4;
1360 struct lpfc_id_range id;
1361};
1362
1363struct lpfc_mbx_dealloc_rsrc_extents {
1364 struct mbox_header header;
1365 struct {
1366 uint32_t word4;
1367#define lpfc_mbx_dealloc_rsrc_extents_type_SHIFT 0
1368#define lpfc_mbx_dealloc_rsrc_extents_type_MASK 0x0000FFFF
1369#define lpfc_mbx_dealloc_rsrc_extents_type_WORD word4
1370 } req;
1371
1372};
1373
1374/* Start SLI4 FCoE specific mbox structures. */
1375
1241struct lpfc_mbx_post_hdr_tmpl { 1376struct lpfc_mbx_post_hdr_tmpl {
1242 struct mbox_header header; 1377 struct mbox_header header;
1243 uint32_t word10; 1378 uint32_t word10;
@@ -1801,61 +1936,31 @@ struct lpfc_mbx_read_rev {
1801 1936
1802struct lpfc_mbx_read_config { 1937struct lpfc_mbx_read_config {
1803 uint32_t word1; 1938 uint32_t word1;
1804#define lpfc_mbx_rd_conf_max_bbc_SHIFT 0 1939#define lpfc_mbx_rd_conf_extnts_inuse_SHIFT 31
1805#define lpfc_mbx_rd_conf_max_bbc_MASK 0x000000FF 1940#define lpfc_mbx_rd_conf_extnts_inuse_MASK 0x00000001
1806#define lpfc_mbx_rd_conf_max_bbc_WORD word1 1941#define lpfc_mbx_rd_conf_extnts_inuse_WORD word1
1807#define lpfc_mbx_rd_conf_init_bbc_SHIFT 8
1808#define lpfc_mbx_rd_conf_init_bbc_MASK 0x000000FF
1809#define lpfc_mbx_rd_conf_init_bbc_WORD word1
1810 uint32_t word2; 1942 uint32_t word2;
1811#define lpfc_mbx_rd_conf_nport_did_SHIFT 0
1812#define lpfc_mbx_rd_conf_nport_did_MASK 0x00FFFFFF
1813#define lpfc_mbx_rd_conf_nport_did_WORD word2
1814#define lpfc_mbx_rd_conf_topology_SHIFT 24 1943#define lpfc_mbx_rd_conf_topology_SHIFT 24
1815#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF 1944#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF
1816#define lpfc_mbx_rd_conf_topology_WORD word2 1945#define lpfc_mbx_rd_conf_topology_WORD word2
1817 uint32_t word3; 1946 uint32_t rsvd_3;
1818#define lpfc_mbx_rd_conf_ao_SHIFT 0
1819#define lpfc_mbx_rd_conf_ao_MASK 0x00000001
1820#define lpfc_mbx_rd_conf_ao_WORD word3
1821#define lpfc_mbx_rd_conf_bb_scn_SHIFT 8
1822#define lpfc_mbx_rd_conf_bb_scn_MASK 0x0000000F
1823#define lpfc_mbx_rd_conf_bb_scn_WORD word3
1824#define lpfc_mbx_rd_conf_cbb_scn_SHIFT 12
1825#define lpfc_mbx_rd_conf_cbb_scn_MASK 0x0000000F
1826#define lpfc_mbx_rd_conf_cbb_scn_WORD word3
1827#define lpfc_mbx_rd_conf_mc_SHIFT 29
1828#define lpfc_mbx_rd_conf_mc_MASK 0x00000001
1829#define lpfc_mbx_rd_conf_mc_WORD word3
1830 uint32_t word4; 1947 uint32_t word4;
1831#define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0 1948#define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0
1832#define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF 1949#define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF
1833#define lpfc_mbx_rd_conf_e_d_tov_WORD word4 1950#define lpfc_mbx_rd_conf_e_d_tov_WORD word4
1834 uint32_t word5; 1951 uint32_t rsvd_5;
1835#define lpfc_mbx_rd_conf_lp_tov_SHIFT 0
1836#define lpfc_mbx_rd_conf_lp_tov_MASK 0x0000FFFF
1837#define lpfc_mbx_rd_conf_lp_tov_WORD word5
1838 uint32_t word6; 1952 uint32_t word6;
1839#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0 1953#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0
1840#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF 1954#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF
1841#define lpfc_mbx_rd_conf_r_a_tov_WORD word6 1955#define lpfc_mbx_rd_conf_r_a_tov_WORD word6
1842 uint32_t word7; 1956 uint32_t rsvd_7;
1843#define lpfc_mbx_rd_conf_r_t_tov_SHIFT 0 1957 uint32_t rsvd_8;
1844#define lpfc_mbx_rd_conf_r_t_tov_MASK 0x000000FF
1845#define lpfc_mbx_rd_conf_r_t_tov_WORD word7
1846 uint32_t word8;
1847#define lpfc_mbx_rd_conf_al_tov_SHIFT 0
1848#define lpfc_mbx_rd_conf_al_tov_MASK 0x0000000F
1849#define lpfc_mbx_rd_conf_al_tov_WORD word8
1850 uint32_t word9; 1958 uint32_t word9;
1851#define lpfc_mbx_rd_conf_lmt_SHIFT 0 1959#define lpfc_mbx_rd_conf_lmt_SHIFT 0
1852#define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF 1960#define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF
1853#define lpfc_mbx_rd_conf_lmt_WORD word9 1961#define lpfc_mbx_rd_conf_lmt_WORD word9
1854 uint32_t word10; 1962 uint32_t rsvd_10;
1855#define lpfc_mbx_rd_conf_max_alpa_SHIFT 0 1963 uint32_t rsvd_11;
1856#define lpfc_mbx_rd_conf_max_alpa_MASK 0x000000FF
1857#define lpfc_mbx_rd_conf_max_alpa_WORD word10
1858 uint32_t word11_rsvd;
1859 uint32_t word12; 1964 uint32_t word12;
1860#define lpfc_mbx_rd_conf_xri_base_SHIFT 0 1965#define lpfc_mbx_rd_conf_xri_base_SHIFT 0
1861#define lpfc_mbx_rd_conf_xri_base_MASK 0x0000FFFF 1966#define lpfc_mbx_rd_conf_xri_base_MASK 0x0000FFFF
@@ -1885,9 +1990,6 @@ struct lpfc_mbx_read_config {
1885#define lpfc_mbx_rd_conf_vfi_count_MASK 0x0000FFFF 1990#define lpfc_mbx_rd_conf_vfi_count_MASK 0x0000FFFF
1886#define lpfc_mbx_rd_conf_vfi_count_WORD word15 1991#define lpfc_mbx_rd_conf_vfi_count_WORD word15
1887 uint32_t word16; 1992 uint32_t word16;
1888#define lpfc_mbx_rd_conf_fcfi_base_SHIFT 0
1889#define lpfc_mbx_rd_conf_fcfi_base_MASK 0x0000FFFF
1890#define lpfc_mbx_rd_conf_fcfi_base_WORD word16
1891#define lpfc_mbx_rd_conf_fcfi_count_SHIFT 16 1993#define lpfc_mbx_rd_conf_fcfi_count_SHIFT 16
1892#define lpfc_mbx_rd_conf_fcfi_count_MASK 0x0000FFFF 1994#define lpfc_mbx_rd_conf_fcfi_count_MASK 0x0000FFFF
1893#define lpfc_mbx_rd_conf_fcfi_count_WORD word16 1995#define lpfc_mbx_rd_conf_fcfi_count_WORD word16
@@ -2197,6 +2299,12 @@ struct lpfc_sli4_parameters {
2197#define cfg_fcoe_SHIFT 0 2299#define cfg_fcoe_SHIFT 0
2198#define cfg_fcoe_MASK 0x00000001 2300#define cfg_fcoe_MASK 0x00000001
2199#define cfg_fcoe_WORD word12 2301#define cfg_fcoe_WORD word12
2302#define cfg_ext_SHIFT 1
2303#define cfg_ext_MASK 0x00000001
2304#define cfg_ext_WORD word12
2305#define cfg_hdrr_SHIFT 2
2306#define cfg_hdrr_MASK 0x00000001
2307#define cfg_hdrr_WORD word12
2200#define cfg_phwq_SHIFT 15 2308#define cfg_phwq_SHIFT 15
2201#define cfg_phwq_MASK 0x00000001 2309#define cfg_phwq_MASK 0x00000001
2202#define cfg_phwq_WORD word12 2310#define cfg_phwq_WORD word12
@@ -2431,6 +2539,9 @@ struct lpfc_mqe {
2431 struct lpfc_mbx_cq_destroy cq_destroy; 2539 struct lpfc_mbx_cq_destroy cq_destroy;
2432 struct lpfc_mbx_wq_destroy wq_destroy; 2540 struct lpfc_mbx_wq_destroy wq_destroy;
2433 struct lpfc_mbx_rq_destroy rq_destroy; 2541 struct lpfc_mbx_rq_destroy rq_destroy;
2542 struct lpfc_mbx_get_rsrc_extent_info rsrc_extent_info;
2543 struct lpfc_mbx_alloc_rsrc_extents alloc_rsrc_extents;
2544 struct lpfc_mbx_dealloc_rsrc_extents dealloc_rsrc_extents;
2434 struct lpfc_mbx_post_sgl_pages post_sgl_pages; 2545 struct lpfc_mbx_post_sgl_pages post_sgl_pages;
2435 struct lpfc_mbx_nembed_cmd nembed_cmd; 2546 struct lpfc_mbx_nembed_cmd nembed_cmd;
2436 struct lpfc_mbx_read_rev read_rev; 2547 struct lpfc_mbx_read_rev read_rev;
@@ -2651,7 +2762,7 @@ struct lpfc_bmbx_create {
2651#define SGL_ALIGN_SZ 64 2762#define SGL_ALIGN_SZ 64
2652#define SGL_PAGE_SIZE 4096 2763#define SGL_PAGE_SIZE 4096
2653/* align SGL addr on a size boundary - adjust address up */ 2764/* align SGL addr on a size boundary - adjust address up */
2654#define NO_XRI ((uint16_t)-1) 2765#define NO_XRI 0xffff
2655 2766
2656struct wqe_common { 2767struct wqe_common {
2657 uint32_t word6; 2768 uint32_t word6;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 2b535cff4b2a..09632ea689e9 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -212,7 +212,6 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
212 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 212 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
213 if (!lpfc_vpd_data) 213 if (!lpfc_vpd_data)
214 goto out_free_mbox; 214 goto out_free_mbox;
215
216 do { 215 do {
217 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 216 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
218 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 217 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
@@ -603,7 +602,6 @@ lpfc_config_port_post(struct lpfc_hba *phba)
603 /* Clear all pending interrupts */ 602 /* Clear all pending interrupts */
604 writel(0xffffffff, phba->HAregaddr); 603 writel(0xffffffff, phba->HAregaddr);
605 readl(phba->HAregaddr); /* flush */ 604 readl(phba->HAregaddr); /* flush */
606
607 phba->link_state = LPFC_HBA_ERROR; 605 phba->link_state = LPFC_HBA_ERROR;
608 if (rc != MBX_BUSY) 606 if (rc != MBX_BUSY)
609 mempool_free(pmb, phba->mbox_mem_pool); 607 mempool_free(pmb, phba->mbox_mem_pool);
@@ -2690,6 +2688,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
2690 kfree(io); 2688 kfree(io);
2691 phba->total_iocbq_bufs--; 2689 phba->total_iocbq_bufs--;
2692 } 2690 }
2691
2693 spin_unlock_irq(&phba->hbalock); 2692 spin_unlock_irq(&phba->hbalock);
2694 return 0; 2693 return 0;
2695} 2694}
@@ -3646,6 +3645,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
3646 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3645 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3647 "2718 Clear Virtual Link Received for VPI 0x%x" 3646 "2718 Clear Virtual Link Received for VPI 0x%x"
3648 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 3647 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
3648
3649 vport = lpfc_find_vport_by_vpid(phba, 3649 vport = lpfc_find_vport_by_vpid(phba,
3650 acqe_fip->index - phba->vpi_base); 3650 acqe_fip->index - phba->vpi_base);
3651 ndlp = lpfc_sli4_perform_vport_cvl(vport); 3651 ndlp = lpfc_sli4_perform_vport_cvl(vport);
@@ -4319,7 +4319,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4319 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); 4319 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
4320 4320
4321 /* 4321 /*
4322 * Initialize dirver internal slow-path work queues 4322 * Initialize driver internal slow-path work queues
4323 */ 4323 */
4324 4324
4325 /* Driver internel slow-path CQ Event pool */ 4325 /* Driver internel slow-path CQ Event pool */
@@ -4335,6 +4335,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4335 /* Receive queue CQ Event work queue list */ 4335 /* Receive queue CQ Event work queue list */
4336 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 4336 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
4337 4337
4338 /* Initialize extent block lists. */
4339 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
4340 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
4341 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
4342 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
4343
4338 /* Initialize the driver internal SLI layer lists. */ 4344 /* Initialize the driver internal SLI layer lists. */
4339 lpfc_sli_setup(phba); 4345 lpfc_sli_setup(phba);
4340 lpfc_sli_queue_setup(phba); 4346 lpfc_sli_queue_setup(phba);
@@ -4409,9 +4415,19 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4409 } 4415 }
4410 /* 4416 /*
4411 * Get sli4 parameters that override parameters from Port capabilities. 4417 * Get sli4 parameters that override parameters from Port capabilities.
4412 * If this call fails it is not a critical error so continue loading. 4418 * If this call fails, it isn't critical unless the SLI4 parameters come
4419 * back in conflict.
4413 */ 4420 */
4414 lpfc_get_sli4_parameters(phba, mboxq); 4421 rc = lpfc_get_sli4_parameters(phba, mboxq);
4422 if (rc) {
4423 if (phba->sli4_hba.extents_in_use &&
4424 phba->sli4_hba.rpi_hdrs_in_use) {
4425 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4426 "2999 Unsupported SLI4 Parameters "
4427 "Extents and RPI headers enabled.\n");
4428 goto out_free_bsmbx;
4429 }
4430 }
4415 mempool_free(mboxq, phba->mbox_mem_pool); 4431 mempool_free(mboxq, phba->mbox_mem_pool);
4416 /* Create all the SLI4 queues */ 4432 /* Create all the SLI4 queues */
4417 rc = lpfc_sli4_queue_create(phba); 4433 rc = lpfc_sli4_queue_create(phba);
@@ -4436,7 +4452,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4436 "1430 Failed to initialize sgl list.\n"); 4452 "1430 Failed to initialize sgl list.\n");
4437 goto out_free_sgl_list; 4453 goto out_free_sgl_list;
4438 } 4454 }
4439
4440 rc = lpfc_sli4_init_rpi_hdrs(phba); 4455 rc = lpfc_sli4_init_rpi_hdrs(phba);
4441 if (rc) { 4456 if (rc) {
4442 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4457 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -4555,6 +4570,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4555 lpfc_sli4_cq_event_release_all(phba); 4570 lpfc_sli4_cq_event_release_all(phba);
4556 lpfc_sli4_cq_event_pool_destroy(phba); 4571 lpfc_sli4_cq_event_pool_destroy(phba);
4557 4572
4573 /* Release resource identifiers. */
4574 lpfc_sli4_dealloc_resource_identifiers(phba);
4575
4558 /* Free the bsmbx region. */ 4576 /* Free the bsmbx region. */
4559 lpfc_destroy_bootstrap_mbox(phba); 4577 lpfc_destroy_bootstrap_mbox(phba);
4560 4578
@@ -4755,6 +4773,7 @@ lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
4755 "Unloading driver.\n", __func__); 4773 "Unloading driver.\n", __func__);
4756 goto out_free_iocbq; 4774 goto out_free_iocbq;
4757 } 4775 }
4776 iocbq_entry->sli4_lxritag = NO_XRI;
4758 iocbq_entry->sli4_xritag = NO_XRI; 4777 iocbq_entry->sli4_xritag = NO_XRI;
4759 4778
4760 spin_lock_irq(&phba->hbalock); 4779 spin_lock_irq(&phba->hbalock);
@@ -4852,7 +4871,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
4852 4871
4853 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4872 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4854 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4873 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4855 "2400 lpfc_init_sgl_list els %d.\n", 4874 "2400 ELS XRI count %d.\n",
4856 els_xri_cnt); 4875 els_xri_cnt);
4857 /* Initialize and populate the sglq list per host/VF. */ 4876 /* Initialize and populate the sglq list per host/VF. */
4858 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 4877 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
@@ -4885,7 +4904,6 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
4885 phba->sli4_hba.scsi_xri_max = 4904 phba->sli4_hba.scsi_xri_max =
4886 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4905 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4887 phba->sli4_hba.scsi_xri_cnt = 0; 4906 phba->sli4_hba.scsi_xri_cnt = 0;
4888
4889 phba->sli4_hba.lpfc_scsi_psb_array = 4907 phba->sli4_hba.lpfc_scsi_psb_array =
4890 kzalloc((sizeof(struct lpfc_scsi_buf *) * 4908 kzalloc((sizeof(struct lpfc_scsi_buf *) *
4891 phba->sli4_hba.scsi_xri_max), GFP_KERNEL); 4909 phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
@@ -4908,13 +4926,6 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
4908 goto out_free_mem; 4926 goto out_free_mem;
4909 } 4927 }
4910 4928
4911 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
4912 if (sglq_entry->sli4_xritag == NO_XRI) {
4913 kfree(sglq_entry);
4914 printk(KERN_ERR "%s: failed to allocate XRI.\n"
4915 "Unloading driver.\n", __func__);
4916 goto out_free_mem;
4917 }
4918 sglq_entry->buff_type = GEN_BUFF_TYPE; 4929 sglq_entry->buff_type = GEN_BUFF_TYPE;
4919 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); 4930 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
4920 if (sglq_entry->virt == NULL) { 4931 if (sglq_entry->virt == NULL) {
@@ -4963,24 +4974,20 @@ int
4963lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 4974lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
4964{ 4975{
4965 int rc = 0; 4976 int rc = 0;
4966 int longs;
4967 uint16_t rpi_count;
4968 struct lpfc_rpi_hdr *rpi_hdr; 4977 struct lpfc_rpi_hdr *rpi_hdr;
4969 4978
4970 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 4979 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
4971
4972 /* 4980 /*
4973 * Provision an rpi bitmask range for discovery. The total count 4981 * If the SLI4 port supports extents, posting the rpi header isn't
4974 * is the difference between max and base + 1. 4982 * required. Set the expected maximum count and let the actual value
4983 * get set when extents are fully allocated.
4975 */ 4984 */
4976 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base + 4985 if (!phba->sli4_hba.rpi_hdrs_in_use) {
4977 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4986 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
4978 4987 return rc;
4979 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG; 4988 }
4980 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long), 4989 if (phba->sli4_hba.extents_in_use)
4981 GFP_KERNEL); 4990 return -EIO;
4982 if (!phba->sli4_hba.rpi_bmask)
4983 return -ENOMEM;
4984 4991
4985 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 4992 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
4986 if (!rpi_hdr) { 4993 if (!rpi_hdr) {
@@ -5014,11 +5021,28 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
5014 struct lpfc_rpi_hdr *rpi_hdr; 5021 struct lpfc_rpi_hdr *rpi_hdr;
5015 uint32_t rpi_count; 5022 uint32_t rpi_count;
5016 5023
5024 /*
5025 * If the SLI4 port supports extents, posting the rpi header isn't
5026 * required. Set the expected maximum count and let the actual value
5027 * get set when extents are fully allocated.
5028 */
5029 if (!phba->sli4_hba.rpi_hdrs_in_use)
5030 return NULL;
5031 if (phba->sli4_hba.extents_in_use)
5032 return NULL;
5033
5034 /* The limit on the logical index is just the max_rpi count. */
5017 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 5035 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
5018 phba->sli4_hba.max_cfg_param.max_rpi - 1; 5036 phba->sli4_hba.max_cfg_param.max_rpi - 1;
5019 5037
5020 spin_lock_irq(&phba->hbalock); 5038 spin_lock_irq(&phba->hbalock);
5021 curr_rpi_range = phba->sli4_hba.next_rpi; 5039 /*
5040 * Establish the starting RPI in this header block. The starting
5041 * rpi is normalized to a zero base because the physical rpi is
5042 * port based.
5043 */
5044 curr_rpi_range = phba->sli4_hba.next_rpi -
5045 phba->sli4_hba.max_cfg_param.rpi_base;
5022 spin_unlock_irq(&phba->hbalock); 5046 spin_unlock_irq(&phba->hbalock);
5023 5047
5024 /* 5048 /*
@@ -5031,6 +5055,8 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
5031 else 5055 else
5032 rpi_count = LPFC_RPI_HDR_COUNT; 5056 rpi_count = LPFC_RPI_HDR_COUNT;
5033 5057
5058 if (!rpi_count)
5059 return NULL;
5034 /* 5060 /*
5035 * First allocate the protocol header region for the port. The 5061 * First allocate the protocol header region for the port. The
5036 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 5062 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
@@ -5063,12 +5089,14 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
5063 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 5089 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
5064 rpi_hdr->page_count = 1; 5090 rpi_hdr->page_count = 1;
5065 spin_lock_irq(&phba->hbalock); 5091 spin_lock_irq(&phba->hbalock);
5066 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi; 5092
5093 /* The rpi_hdr stores the logical index only. */
5094 rpi_hdr->start_rpi = curr_rpi_range;
5067 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 5095 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
5068 5096
5069 /* 5097 /*
5070 * The next_rpi stores the next module-64 rpi value to post 5098 * The next_rpi stores the next logical module-64 rpi value used
5071 * in any subsequent rpi memory region postings. 5099 * to post physical rpis in subsequent rpi postings.
5072 */ 5100 */
5073 phba->sli4_hba.next_rpi += rpi_count; 5101 phba->sli4_hba.next_rpi += rpi_count;
5074 spin_unlock_irq(&phba->hbalock); 5102 spin_unlock_irq(&phba->hbalock);
@@ -5087,15 +5115,18 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
5087 * @phba: pointer to lpfc hba data structure. 5115 * @phba: pointer to lpfc hba data structure.
5088 * 5116 *
5089 * This routine is invoked to remove all memory resources allocated 5117 * This routine is invoked to remove all memory resources allocated
5090 * to support rpis. This routine presumes the caller has released all 5118 * to support rpis for SLI4 ports not supporting extents. This routine
5091 * rpis consumed by fabric or port logins and is prepared to have 5119 * presumes the caller has released all rpis consumed by fabric or port
5092 * the header pages removed. 5120 * logins and is prepared to have the header pages removed.
5093 **/ 5121 **/
5094void 5122void
5095lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 5123lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
5096{ 5124{
5097 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 5125 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
5098 5126
5127 if (!phba->sli4_hba.rpi_hdrs_in_use)
5128 goto exit;
5129
5099 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 5130 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
5100 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 5131 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
5101 list_del(&rpi_hdr->list); 5132 list_del(&rpi_hdr->list);
@@ -5104,7 +5135,9 @@ lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
5104 kfree(rpi_hdr->dmabuf); 5135 kfree(rpi_hdr->dmabuf);
5105 kfree(rpi_hdr); 5136 kfree(rpi_hdr);
5106 } 5137 }
5107 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 5138 exit:
5139 /* There are no rpis available to the port now. */
5140 phba->sli4_hba.next_rpi = 0;
5108} 5141}
5109 5142
5110/** 5143/**
@@ -5873,6 +5906,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5873 rc = -EIO; 5906 rc = -EIO;
5874 } else { 5907 } else {
5875 rd_config = &pmb->u.mqe.un.rd_config; 5908 rd_config = &pmb->u.mqe.un.rd_config;
5909 phba->sli4_hba.extents_in_use =
5910 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
5876 phba->sli4_hba.max_cfg_param.max_xri = 5911 phba->sli4_hba.max_cfg_param.max_xri =
5877 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 5912 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
5878 phba->sli4_hba.max_cfg_param.xri_base = 5913 phba->sli4_hba.max_cfg_param.xri_base =
@@ -5891,8 +5926,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5891 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 5926 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
5892 phba->sli4_hba.max_cfg_param.max_fcfi = 5927 phba->sli4_hba.max_cfg_param.max_fcfi =
5893 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 5928 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
5894 phba->sli4_hba.max_cfg_param.fcfi_base =
5895 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
5896 phba->sli4_hba.max_cfg_param.max_eq = 5929 phba->sli4_hba.max_cfg_param.max_eq =
5897 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 5930 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
5898 phba->sli4_hba.max_cfg_param.max_rq = 5931 phba->sli4_hba.max_cfg_param.max_rq =
@@ -5910,11 +5943,13 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5910 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 5943 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
5911 phba->max_vports = phba->max_vpi; 5944 phba->max_vports = phba->max_vpi;
5912 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5945 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5913 "2003 cfg params XRI(B:%d M:%d), " 5946 "2003 cfg params Extents? %d "
5947 "XRI(B:%d M:%d), "
5914 "VPI(B:%d M:%d) " 5948 "VPI(B:%d M:%d) "
5915 "VFI(B:%d M:%d) " 5949 "VFI(B:%d M:%d) "
5916 "RPI(B:%d M:%d) " 5950 "RPI(B:%d M:%d) "
5917 "FCFI(B:%d M:%d)\n", 5951 "FCFI(Count:%d)\n",
5952 phba->sli4_hba.extents_in_use,
5918 phba->sli4_hba.max_cfg_param.xri_base, 5953 phba->sli4_hba.max_cfg_param.xri_base,
5919 phba->sli4_hba.max_cfg_param.max_xri, 5954 phba->sli4_hba.max_cfg_param.max_xri,
5920 phba->sli4_hba.max_cfg_param.vpi_base, 5955 phba->sli4_hba.max_cfg_param.vpi_base,
@@ -5923,7 +5958,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5923 phba->sli4_hba.max_cfg_param.max_vfi, 5958 phba->sli4_hba.max_cfg_param.max_vfi,
5924 phba->sli4_hba.max_cfg_param.rpi_base, 5959 phba->sli4_hba.max_cfg_param.rpi_base,
5925 phba->sli4_hba.max_cfg_param.max_rpi, 5960 phba->sli4_hba.max_cfg_param.max_rpi,
5926 phba->sli4_hba.max_cfg_param.fcfi_base,
5927 phba->sli4_hba.max_cfg_param.max_fcfi); 5961 phba->sli4_hba.max_cfg_param.max_fcfi);
5928 } 5962 }
5929 5963
@@ -8104,6 +8138,13 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8104 int length; 8138 int length;
8105 struct lpfc_sli4_parameters *mbx_sli4_parameters; 8139 struct lpfc_sli4_parameters *mbx_sli4_parameters;
8106 8140
8141 /*
8142 * By default, the driver assumes the SLI4 port requires RPI
8143 * header postings. The SLI4_PARAM response will correct this
8144 * assumption.
8145 */
8146 phba->sli4_hba.rpi_hdrs_in_use = 1;
8147
8107 /* Read the port's SLI4 Config Parameters */ 8148 /* Read the port's SLI4 Config Parameters */
8108 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 8149 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
8109 sizeof(struct lpfc_sli4_cfg_mhdr)); 8150 sizeof(struct lpfc_sli4_cfg_mhdr));
@@ -8140,6 +8181,8 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8140 mbx_sli4_parameters); 8181 mbx_sli4_parameters);
8141 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 8182 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
8142 mbx_sli4_parameters); 8183 mbx_sli4_parameters);
8184 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
8185 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
8143 8186
8144 /* Make sure that sge_supp_len can be handled by the driver */ 8187 /* Make sure that sge_supp_len can be handled by the driver */
8145 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 8188 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index e6ce9033f85e..556767028353 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -610,7 +610,8 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
610 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm); 610 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
611 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys); 611 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
612 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys); 612 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
613 mb->un.varRdSparm.vpi = vpi + phba->vpi_base; 613 if (phba->sli_rev >= LPFC_SLI_REV3)
614 mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
614 615
615 /* save address for completion */ 616 /* save address for completion */
616 pmb->context1 = mp; 617 pmb->context1 = mp;
@@ -643,9 +644,10 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
643 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 644 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
644 645
645 mb->un.varUnregDID.did = did; 646 mb->un.varUnregDID.did = did;
646 if (vpi != 0xffff)
647 vpi += phba->vpi_base;
648 mb->un.varUnregDID.vpi = vpi; 647 mb->un.varUnregDID.vpi = vpi;
648 if ((vpi != 0xffff) &&
649 (phba->sli_rev == LPFC_SLI_REV4))
650 mb->un.varUnregDID.vpi = phba->vpi_ids[vpi];
649 651
650 mb->mbxCommand = MBX_UNREG_D_ID; 652 mb->mbxCommand = MBX_UNREG_D_ID;
651 mb->mbxOwner = OWN_HOST; 653 mb->mbxOwner = OWN_HOST;
@@ -738,12 +740,10 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
738 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 740 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
739 741
740 mb->un.varRegLogin.rpi = 0; 742 mb->un.varRegLogin.rpi = 0;
741 if (phba->sli_rev == LPFC_SLI_REV4) { 743 if (phba->sli_rev == LPFC_SLI_REV4)
742 mb->un.varRegLogin.rpi = rpi; 744 mb->un.varRegLogin.rpi = phba->sli4_hba.rpi_ids[rpi];
743 if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR) 745 if (phba->sli_rev >= LPFC_SLI_REV3)
744 return 1; 746 mb->un.varRegLogin.vpi = phba->vpi_ids[vpi];
745 }
746 mb->un.varRegLogin.vpi = vpi + phba->vpi_base;
747 mb->un.varRegLogin.did = did; 747 mb->un.varRegLogin.did = did;
748 mb->mbxOwner = OWN_HOST; 748 mb->mbxOwner = OWN_HOST;
749 /* Get a buffer to hold NPorts Service Parameters */ 749 /* Get a buffer to hold NPorts Service Parameters */
@@ -757,7 +757,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
757 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 757 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
758 "0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, " 758 "0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, "
759 "rpi x%x\n", vpi, did, rpi); 759 "rpi x%x\n", vpi, did, rpi);
760 return (1); 760 return 1;
761 } 761 }
762 INIT_LIST_HEAD(&mp->list); 762 INIT_LIST_HEAD(&mp->list);
763 sparam = mp->virt; 763 sparam = mp->virt;
@@ -773,7 +773,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
773 mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys); 773 mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
774 mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys); 774 mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
775 775
776 return (0); 776 return 0;
777} 777}
778 778
779/** 779/**
@@ -789,6 +789,9 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
789 * 789 *
790 * This routine prepares the mailbox command for unregistering remote port 790 * This routine prepares the mailbox command for unregistering remote port
791 * login. 791 * login.
792 *
793 * For SLI4 ports, the rpi passed to this function must be the physical
794 * rpi value, not the logical index.
792 **/ 795 **/
793void 796void
794lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi, 797lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
@@ -799,9 +802,10 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
799 mb = &pmb->u.mb; 802 mb = &pmb->u.mb;
800 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 803 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
801 804
802 mb->un.varUnregLogin.rpi = (uint16_t) rpi; 805 mb->un.varUnregLogin.rpi = rpi;
803 mb->un.varUnregLogin.rsvd1 = 0; 806 mb->un.varUnregLogin.rsvd1 = 0;
804 mb->un.varUnregLogin.vpi = vpi + phba->vpi_base; 807 if (phba->sli_rev >= LPFC_SLI_REV3)
808 mb->un.varUnregLogin.vpi = phba->vpi_ids[vpi];
805 809
806 mb->mbxCommand = MBX_UNREG_LOGIN; 810 mb->mbxCommand = MBX_UNREG_LOGIN;
807 mb->mbxOwner = OWN_HOST; 811 mb->mbxOwner = OWN_HOST;
@@ -825,9 +829,16 @@ lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
825 829
826 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 830 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
827 if (mbox) { 831 if (mbox) {
828 lpfc_unreg_login(phba, vport->vpi, 832 /*
829 vport->vpi + phba->vpi_base, mbox); 833 * For SLI4 functions, the rpi field is overloaded for
830 mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000 ; 834 * the vport context unreg all. This routine passes
835 * 0 for the rpi field in lpfc_unreg_login for compatibility
836 * with SLI3 and then overrides the rpi field with the
837 * expected value for SLI4.
838 */
839 lpfc_unreg_login(phba, vport->vpi, phba->vpi_ids[vport->vpi],
840 mbox);
841 mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000;
831 mbox->vport = vport; 842 mbox->vport = vport;
832 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 843 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
833 mbox->context1 = NULL; 844 mbox->context1 = NULL;
@@ -865,9 +876,13 @@ lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
865 if ((phba->sli_rev == LPFC_SLI_REV4) && 876 if ((phba->sli_rev == LPFC_SLI_REV4) &&
866 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) 877 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI))
867 mb->un.varRegVpi.upd = 1; 878 mb->un.varRegVpi.upd = 1;
868 mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base; 879
880 mb->un.varRegVpi.vpi = phba->vpi_ids[vport->vpi];
869 mb->un.varRegVpi.sid = vport->fc_myDID; 881 mb->un.varRegVpi.sid = vport->fc_myDID;
870 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base; 882 if (phba->sli_rev == LPFC_SLI_REV4)
883 mb->un.varRegVpi.vfi = phba->sli4_hba.vfi_ids[vport->vfi];
884 else
885 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
871 memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname, 886 memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname,
872 sizeof(struct lpfc_name)); 887 sizeof(struct lpfc_name));
873 mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]); 888 mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]);
@@ -901,10 +916,10 @@ lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
901 MAILBOX_t *mb = &pmb->u.mb; 916 MAILBOX_t *mb = &pmb->u.mb;
902 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 917 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
903 918
904 if (phba->sli_rev < LPFC_SLI_REV4) 919 if (phba->sli_rev == LPFC_SLI_REV3)
905 mb->un.varUnregVpi.vpi = vpi + phba->vpi_base; 920 mb->un.varUnregVpi.vpi = phba->vpi_ids[vpi];
906 else 921 else if (phba->sli_rev >= LPFC_SLI_REV4)
907 mb->un.varUnregVpi.sli4_vpi = vpi + phba->vpi_base; 922 mb->un.varUnregVpi.sli4_vpi = phba->vpi_ids[vpi];
908 923
909 mb->mbxCommand = MBX_UNREG_VPI; 924 mb->mbxCommand = MBX_UNREG_VPI;
910 mb->mbxOwner = OWN_HOST; 925 mb->mbxOwner = OWN_HOST;
@@ -1735,12 +1750,12 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1735 return length; 1750 return length;
1736 } 1751 }
1737 1752
1738 /* Setup for the none-embedded mbox command */ 1753 /* Setup for the non-embedded mbox command */
1739 pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE; 1754 pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE;
1740 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ? 1755 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
1741 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount; 1756 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
1742 /* Allocate record for keeping SGE virtual addresses */ 1757 /* Allocate record for keeping SGE virtual addresses */
1743 mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt), 1758 mbox->sge_array = kzalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
1744 GFP_KERNEL); 1759 GFP_KERNEL);
1745 if (!mbox->sge_array) { 1760 if (!mbox->sge_array) {
1746 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1761 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
@@ -1790,12 +1805,87 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1790 /* The sub-header is in DMA memory, which needs endian converstion */ 1805 /* The sub-header is in DMA memory, which needs endian converstion */
1791 if (cfg_shdr) 1806 if (cfg_shdr)
1792 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr, 1807 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
1793 sizeof(union lpfc_sli4_cfg_shdr)); 1808 sizeof(union lpfc_sli4_cfg_shdr));
1794
1795 return alloc_len; 1809 return alloc_len;
1796} 1810}
1797 1811
1798/** 1812/**
1813 * lpfc_sli4_mbox_rsrc_extent - Initialize the opcode resource extent.
1814 * @phba: pointer to lpfc hba data structure.
1815 * @mbox: pointer to an allocated lpfc mbox resource.
1816 * @exts_count: the number of extents, if required, to allocate.
1817 * @rsrc_type: the resource extent type.
1818 * @emb: true if LPFC_SLI4_MBX_EMBED. false if LPFC_SLI4_MBX_NEMBED.
1819 *
1820 * This routine completes the subcommand header for SLI4 resource extent
1821 * mailbox commands. It is called after lpfc_sli4_config. The caller must
1822 * pass an allocated mailbox and the attributes required to initialize the
1823 * mailbox correctly.
1824 *
1825 * Return: the actual length of the mbox command allocated.
1826 **/
1827int
1828lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1829 uint16_t exts_count, uint16_t rsrc_type, bool emb)
1830{
1831 uint8_t opcode = 0;
1832 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc_extnt = NULL;
1833 void *virtaddr = NULL;
1834
1835 /* Set up SLI4 ioctl command header fields */
1836 if (emb == LPFC_SLI4_MBX_NEMBED) {
1837 /* Get the first SGE entry from the non-embedded DMA memory */
1838 virtaddr = mbox->sge_array->addr[0];
1839 if (virtaddr == NULL)
1840 return 1;
1841 n_rsrc_extnt = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
1842 }
1843
1844 /*
1845 * The resource type is common to all extent Opcodes and resides in the
1846 * same position.
1847 */
1848 if (emb == LPFC_SLI4_MBX_EMBED)
1849 bf_set(lpfc_mbx_alloc_rsrc_extents_type,
1850 &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
1851 rsrc_type);
1852 else {
1853 /* This is DMA data. Byteswap is required. */
1854 bf_set(lpfc_mbx_alloc_rsrc_extents_type,
1855 n_rsrc_extnt, rsrc_type);
1856 lpfc_sli_pcimem_bcopy(&n_rsrc_extnt->word4,
1857 &n_rsrc_extnt->word4,
1858 sizeof(uint32_t));
1859 }
1860
1861 /* Complete the initialization for the particular Opcode. */
1862 opcode = lpfc_sli4_mbox_opcode_get(phba, mbox);
1863 switch (opcode) {
1864 case LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT:
1865 if (emb == LPFC_SLI4_MBX_EMBED)
1866 bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
1867 &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
1868 exts_count);
1869 else
1870 bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
1871 n_rsrc_extnt, exts_count);
1872 break;
1873 case LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT:
1874 case LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO:
1875 case LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT:
1876 /* Initialization is complete.*/
1877 break;
1878 default:
1879 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1880 "2929 Resource Extent Opcode x%x is "
1881 "unsupported\n", opcode);
1882 return 1;
1883 }
1884
1885 return 0;
1886}
1887
1888/**
1799 * lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command 1889 * lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command
1800 * @phba: pointer to lpfc hba data structure. 1890 * @phba: pointer to lpfc hba data structure.
1801 * @mbox: pointer to lpfc mbox command. 1891 * @mbox: pointer to lpfc mbox command.
@@ -1939,9 +2029,12 @@ lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
1939 bf_set(lpfc_init_vfi_vr, init_vfi, 1); 2029 bf_set(lpfc_init_vfi_vr, init_vfi, 1);
1940 bf_set(lpfc_init_vfi_vt, init_vfi, 1); 2030 bf_set(lpfc_init_vfi_vt, init_vfi, 1);
1941 bf_set(lpfc_init_vfi_vp, init_vfi, 1); 2031 bf_set(lpfc_init_vfi_vp, init_vfi, 1);
1942 bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base); 2032 bf_set(lpfc_init_vfi_vfi, init_vfi,
1943 bf_set(lpfc_init_vpi_vpi, init_vfi, vport->vpi + vport->phba->vpi_base); 2033 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
1944 bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi); 2034 bf_set(lpfc_init_vpi_vpi, init_vfi,
2035 vport->phba->vpi_ids[vport->vpi]);
2036 bf_set(lpfc_init_vfi_fcfi, init_vfi,
2037 vport->phba->fcf.fcfi);
1945} 2038}
1946 2039
1947/** 2040/**
@@ -1964,9 +2057,10 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
1964 reg_vfi = &mbox->u.mqe.un.reg_vfi; 2057 reg_vfi = &mbox->u.mqe.un.reg_vfi;
1965 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI); 2058 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
1966 bf_set(lpfc_reg_vfi_vp, reg_vfi, 1); 2059 bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
1967 bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base); 2060 bf_set(lpfc_reg_vfi_vfi, reg_vfi,
2061 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
1968 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi); 2062 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
1969 bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base); 2063 bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->phba->vpi_ids[vport->vpi]);
1970 memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name)); 2064 memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
1971 reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]); 2065 reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
1972 reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]); 2066 reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
@@ -1997,9 +2091,9 @@ lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
1997 memset(mbox, 0, sizeof(*mbox)); 2091 memset(mbox, 0, sizeof(*mbox));
1998 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI); 2092 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
1999 bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, 2093 bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi,
2000 vpi + phba->vpi_base); 2094 phba->vpi_ids[vpi]);
2001 bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi, 2095 bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi,
2002 phba->pport->vfi + phba->vfi_base); 2096 phba->sli4_hba.vfi_ids[phba->pport->vfi]);
2003} 2097}
2004 2098
2005/** 2099/**
@@ -2019,7 +2113,7 @@ lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
2019 memset(mbox, 0, sizeof(*mbox)); 2113 memset(mbox, 0, sizeof(*mbox));
2020 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI); 2114 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
2021 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, 2115 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi,
2022 vport->vfi + vport->phba->vfi_base); 2116 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
2023} 2117}
2024 2118
2025/** 2119/**
@@ -2131,12 +2225,14 @@ lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
2131void 2225void
2132lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp) 2226lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
2133{ 2227{
2228 struct lpfc_hba *phba = ndlp->phba;
2134 struct lpfc_mbx_resume_rpi *resume_rpi; 2229 struct lpfc_mbx_resume_rpi *resume_rpi;
2135 2230
2136 memset(mbox, 0, sizeof(*mbox)); 2231 memset(mbox, 0, sizeof(*mbox));
2137 resume_rpi = &mbox->u.mqe.un.resume_rpi; 2232 resume_rpi = &mbox->u.mqe.un.resume_rpi;
2138 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI); 2233 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
2139 bf_set(lpfc_resume_rpi_index, resume_rpi, ndlp->nlp_rpi); 2234 bf_set(lpfc_resume_rpi_index, resume_rpi,
2235 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2140 bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI); 2236 bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
2141 resume_rpi->event_tag = ndlp->phba->fc_eventTag; 2237 resume_rpi->event_tag = ndlp->phba->fc_eventTag;
2142} 2238}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index cbb48ee8b0bb..10d5b5e41499 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -62,7 +62,6 @@ int
62lpfc_mem_alloc(struct lpfc_hba *phba, int align) 62lpfc_mem_alloc(struct lpfc_hba *phba, int align)
63{ 63{
64 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 64 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
65 int longs;
66 int i; 65 int i;
67 66
68 if (phba->sli_rev == LPFC_SLI_REV4) 67 if (phba->sli_rev == LPFC_SLI_REV4)
@@ -138,17 +137,8 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
138 phba->lpfc_hrb_pool = NULL; 137 phba->lpfc_hrb_pool = NULL;
139 phba->lpfc_drb_pool = NULL; 138 phba->lpfc_drb_pool = NULL;
140 } 139 }
141 /* vpi zero is reserved for the physical port so add 1 to max */
142 longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
143 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
144 if (!phba->vpi_bmask)
145 goto fail_free_dbq_pool;
146 140
147 return 0; 141 return 0;
148
149 fail_free_dbq_pool:
150 pci_pool_destroy(phba->lpfc_drb_pool);
151 phba->lpfc_drb_pool = NULL;
152 fail_free_hrb_pool: 142 fail_free_hrb_pool:
153 pci_pool_destroy(phba->lpfc_hrb_pool); 143 pci_pool_destroy(phba->lpfc_hrb_pool);
154 phba->lpfc_hrb_pool = NULL; 144 phba->lpfc_hrb_pool = NULL;
@@ -191,9 +181,6 @@ lpfc_mem_free(struct lpfc_hba *phba)
191 int i; 181 int i;
192 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 182 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
193 183
194 /* Free VPI bitmask memory */
195 kfree(phba->vpi_bmask);
196
197 /* Free HBQ pools */ 184 /* Free HBQ pools */
198 lpfc_sli_hbqbuf_free_all(phba); 185 lpfc_sli_hbqbuf_free_all(phba);
199 if (phba->lpfc_drb_pool) 186 if (phba->lpfc_drb_pool)
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 9bf7eb85d172..2ddd02f7c603 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -652,6 +652,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
652 lpfc_unreg_rpi(vport, ndlp); 652 lpfc_unreg_rpi(vport, ndlp);
653 return 0; 653 return 0;
654} 654}
655
655/** 656/**
656 * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd. 657 * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
657 * @phba : Pointer to lpfc_hba structure. 658 * @phba : Pointer to lpfc_hba structure.
@@ -1394,8 +1395,11 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1394 if (mb->mbxStatus) { 1395 if (mb->mbxStatus) {
1395 /* RegLogin failed */ 1396 /* RegLogin failed */
1396 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1397 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1397 "0246 RegLogin failed Data: x%x x%x x%x\n", 1398 "0246 RegLogin failed Data: x%x x%x x%x x%x "
1398 did, mb->mbxStatus, vport->port_state); 1399 "x%x\n",
1400 did, mb->mbxStatus, vport->port_state,
1401 mb->un.varRegLogin.vpi,
1402 mb->un.varRegLogin.rpi);
1399 /* 1403 /*
1400 * If RegLogin failed due to lack of HBA resources do not 1404 * If RegLogin failed due to lack of HBA resources do not
1401 * retry discovery. 1405 * retry discovery.
@@ -1419,7 +1423,10 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1419 return ndlp->nlp_state; 1423 return ndlp->nlp_state;
1420 } 1424 }
1421 1425
1422 ndlp->nlp_rpi = mb->un.varWords[0]; 1426 /* SLI4 ports have preallocated logical rpis. */
1427 if (vport->phba->sli_rev < LPFC_SLI_REV4)
1428 ndlp->nlp_rpi = mb->un.varWords[0];
1429
1423 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1430 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1424 1431
1425 /* Only if we are not a fabric nport do we issue PRLI */ 1432 /* Only if we are not a fabric nport do we issue PRLI */
@@ -2020,7 +2027,9 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
2020 MAILBOX_t *mb = &pmb->u.mb; 2027 MAILBOX_t *mb = &pmb->u.mb;
2021 2028
2022 if (!mb->mbxStatus) { 2029 if (!mb->mbxStatus) {
2023 ndlp->nlp_rpi = mb->un.varWords[0]; 2030 /* SLI4 ports have preallocated logical rpis. */
2031 if (vport->phba->sli_rev < LPFC_SLI_REV4)
2032 ndlp->nlp_rpi = mb->un.varWords[0];
2024 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 2033 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
2025 } else { 2034 } else {
2026 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { 2035 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index bc8359b038c4..3ccc97496ebf 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -743,7 +743,14 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
743 if (bcnt == 0) 743 if (bcnt == 0)
744 continue; 744 continue;
745 /* Now, post the SCSI buffer list sgls as a block */ 745 /* Now, post the SCSI buffer list sgls as a block */
746 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt); 746 if (!phba->sli4_hba.extents_in_use)
747 status = lpfc_sli4_post_scsi_sgl_block(phba,
748 &sblist,
749 bcnt);
750 else
751 status = lpfc_sli4_post_scsi_sgl_blk_ext(phba,
752 &sblist,
753 bcnt);
747 /* Reset SCSI buffer count for next round of posting */ 754 /* Reset SCSI buffer count for next round of posting */
748 bcnt = 0; 755 bcnt = 0;
749 while (!list_empty(&sblist)) { 756 while (!list_empty(&sblist)) {
@@ -787,7 +794,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
787 dma_addr_t pdma_phys_fcp_cmd; 794 dma_addr_t pdma_phys_fcp_cmd;
788 dma_addr_t pdma_phys_fcp_rsp; 795 dma_addr_t pdma_phys_fcp_rsp;
789 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1; 796 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
790 uint16_t iotag, last_xritag = NO_XRI; 797 uint16_t iotag, last_xritag = NO_XRI, lxri = 0;
791 int status = 0, index; 798 int status = 0, index;
792 int bcnt; 799 int bcnt;
793 int non_sequential_xri = 0; 800 int non_sequential_xri = 0;
@@ -823,13 +830,15 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
823 break; 830 break;
824 } 831 }
825 832
826 psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba); 833 lxri = lpfc_sli4_next_xritag(phba);
827 if (psb->cur_iocbq.sli4_xritag == NO_XRI) { 834 if (lxri == NO_XRI) {
828 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 835 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
829 psb->data, psb->dma_handle); 836 psb->data, psb->dma_handle);
830 kfree(psb); 837 kfree(psb);
831 break; 838 break;
832 } 839 }
840 psb->cur_iocbq.sli4_lxritag = lxri;
841 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
833 if (last_xritag != NO_XRI 842 if (last_xritag != NO_XRI
834 && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) { 843 && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
835 non_sequential_xri = 1; 844 non_sequential_xri = 1;
@@ -916,7 +925,21 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
916 } 925 }
917 } 926 }
918 if (bcnt) { 927 if (bcnt) {
919 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt); 928 if (!phba->sli4_hba.extents_in_use)
929 status = lpfc_sli4_post_scsi_sgl_block(phba,
930 &sblist,
931 bcnt);
932 else
933 status = lpfc_sli4_post_scsi_sgl_blk_ext(phba,
934 &sblist,
935 bcnt);
936
937 if (status) {
938 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
939 "3021 SCSI SGL post error %d\n",
940 status);
941 bcnt = 0;
942 }
920 /* Reset SCSI buffer count for next round of posting */ 943 /* Reset SCSI buffer count for next round of posting */
921 while (!list_empty(&sblist)) { 944 while (!list_empty(&sblist)) {
922 list_remove_head(&sblist, psb, struct lpfc_scsi_buf, 945 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
@@ -2797,6 +2820,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2797 * of the scsi_cmnd request_buffer 2820 * of the scsi_cmnd request_buffer
2798 */ 2821 */
2799 piocbq->iocb.ulpContext = pnode->nlp_rpi; 2822 piocbq->iocb.ulpContext = pnode->nlp_rpi;
2823 if (phba->sli_rev == LPFC_SLI_REV4)
2824 piocbq->iocb.ulpContext =
2825 phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
2800 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 2826 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
2801 piocbq->iocb.ulpFCP2Rcvy = 1; 2827 piocbq->iocb.ulpFCP2Rcvy = 1;
2802 else 2828 else
@@ -2810,7 +2836,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2810} 2836}
2811 2837
2812/** 2838/**
2813 * lpfc_scsi_prep_task_mgmt_cmnd - Convert SLI3 scsi TM cmd to FCP info unit 2839 * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
2814 * @vport: The virtual port for which this call is being executed. 2840 * @vport: The virtual port for which this call is being executed.
2815 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 2841 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2816 * @lun: Logical unit number. 2842 * @lun: Logical unit number.
@@ -2854,6 +2880,10 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
2854 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd); 2880 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
2855 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 2881 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
2856 piocb->ulpContext = ndlp->nlp_rpi; 2882 piocb->ulpContext = ndlp->nlp_rpi;
2883 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
2884 piocb->ulpContext =
2885 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
2886 }
2857 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { 2887 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
2858 piocb->ulpFCP2Rcvy = 1; 2888 piocb->ulpFCP2Rcvy = 1;
2859 } 2889 }
@@ -3408,9 +3438,10 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
3408 3438
3409 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 3439 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3410 "0702 Issue %s to TGT %d LUN %d " 3440 "0702 Issue %s to TGT %d LUN %d "
3411 "rpi x%x nlp_flag x%x\n", 3441 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
3412 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, 3442 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
3413 pnode->nlp_rpi, pnode->nlp_flag); 3443 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
3444 iocbq->iocb_flag);
3414 3445
3415 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, 3446 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
3416 iocbq, iocbqrsp, lpfc_cmd->timeout); 3447 iocbq, iocbqrsp, lpfc_cmd->timeout);
@@ -3422,10 +3453,12 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
3422 ret = FAILED; 3453 ret = FAILED;
3423 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 3454 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
3424 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3455 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3425 "0727 TMF %s to TGT %d LUN %d failed (%d, %d)\n", 3456 "0727 TMF %s to TGT %d LUN %d failed (%d, %d) "
3457 "iocb_flag x%x\n",
3426 lpfc_taskmgmt_name(task_mgmt_cmd), 3458 lpfc_taskmgmt_name(task_mgmt_cmd),
3427 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus, 3459 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
3428 iocbqrsp->iocb.un.ulpWord[4]); 3460 iocbqrsp->iocb.un.ulpWord[4],
3461 iocbq->iocb_flag);
3429 } else if (status == IOCB_BUSY) 3462 } else if (status == IOCB_BUSY)
3430 ret = FAILED; 3463 ret = FAILED;
3431 else 3464 else
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index fcfa8c8cfb67..98999bbd8cbf 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -459,7 +459,6 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
459 struct lpfc_iocbq * iocbq = NULL; 459 struct lpfc_iocbq * iocbq = NULL;
460 460
461 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 461 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
462
463 if (iocbq) 462 if (iocbq)
464 phba->iocb_cnt++; 463 phba->iocb_cnt++;
465 if (phba->iocb_cnt > phba->iocb_max) 464 if (phba->iocb_cnt > phba->iocb_max)
@@ -482,13 +481,10 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
482static struct lpfc_sglq * 481static struct lpfc_sglq *
483__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 482__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
484{ 483{
485 uint16_t adj_xri;
486 struct lpfc_sglq *sglq; 484 struct lpfc_sglq *sglq;
487 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; 485
488 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri) 486 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
489 return NULL; 487 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
490 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
491 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
492 return sglq; 488 return sglq;
493} 489}
494 490
@@ -507,12 +503,9 @@ __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
507struct lpfc_sglq * 503struct lpfc_sglq *
508__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 504__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
509{ 505{
510 uint16_t adj_xri;
511 struct lpfc_sglq *sglq; 506 struct lpfc_sglq *sglq;
512 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; 507
513 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri) 508 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
514 return NULL;
515 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
516 return sglq; 509 return sglq;
517} 510}
518 511
@@ -535,7 +528,6 @@ static int
535__lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 528__lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
536 uint16_t xritag, uint16_t rxid, uint16_t send_rrq) 529 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
537{ 530{
538 uint16_t adj_xri;
539 struct lpfc_node_rrq *rrq; 531 struct lpfc_node_rrq *rrq;
540 int empty; 532 int empty;
541 uint32_t did = 0; 533 uint32_t did = 0;
@@ -556,21 +548,19 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
556 /* 548 /*
557 * set the active bit even if there is no mem available. 549 * set the active bit even if there is no mem available.
558 */ 550 */
559 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
560
561 if (NLP_CHK_FREE_REQ(ndlp)) 551 if (NLP_CHK_FREE_REQ(ndlp))
562 goto out; 552 goto out;
563 553
564 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) 554 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
565 goto out; 555 goto out;
566 556
567 if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) 557 if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
568 goto out; 558 goto out;
569 559
570 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); 560 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
571 if (rrq) { 561 if (rrq) {
572 rrq->send_rrq = send_rrq; 562 rrq->send_rrq = send_rrq;
573 rrq->xritag = xritag; 563 rrq->xritag = phba->sli4_hba.xri_ids[xritag];
574 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1); 564 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
575 rrq->ndlp = ndlp; 565 rrq->ndlp = ndlp;
576 rrq->nlp_DID = ndlp->nlp_DID; 566 rrq->nlp_DID = ndlp->nlp_DID;
@@ -606,7 +596,6 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
606 uint16_t xritag, 596 uint16_t xritag,
607 struct lpfc_node_rrq *rrq) 597 struct lpfc_node_rrq *rrq)
608{ 598{
609 uint16_t adj_xri;
610 struct lpfc_nodelist *ndlp = NULL; 599 struct lpfc_nodelist *ndlp = NULL;
611 600
612 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp)) 601 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
@@ -622,8 +611,7 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
622 if (!ndlp) 611 if (!ndlp)
623 goto out; 612 goto out;
624 613
625 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; 614 if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) {
626 if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) {
627 rrq->send_rrq = 0; 615 rrq->send_rrq = 0;
628 rrq->xritag = 0; 616 rrq->xritag = 0;
629 rrq->rrq_stop_time = 0; 617 rrq->rrq_stop_time = 0;
@@ -799,12 +787,9 @@ int
799lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 787lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
800 uint16_t xritag) 788 uint16_t xritag)
801{ 789{
802 uint16_t adj_xri;
803
804 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
805 if (!ndlp) 790 if (!ndlp)
806 return 0; 791 return 0;
807 if (test_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) 792 if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap))
808 return 1; 793 return 1;
809 else 794 else
810 return 0; 795 return 0;
@@ -844,7 +829,7 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
844 * @piocb: Pointer to the iocbq. 829 * @piocb: Pointer to the iocbq.
845 * 830 *
846 * This function is called with hbalock held. This function 831 * This function is called with hbalock held. This function
847 * Gets a new driver sglq object from the sglq list. If the 832 * gets a new driver sglq object from the sglq list. If the
848 * list is not empty then it is successful, it returns pointer to the newly 833 * list is not empty then it is successful, it returns pointer to the newly
849 * allocated sglq object else it returns NULL. 834 * allocated sglq object else it returns NULL.
850 **/ 835 **/
@@ -854,7 +839,6 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
854 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list; 839 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
855 struct lpfc_sglq *sglq = NULL; 840 struct lpfc_sglq *sglq = NULL;
856 struct lpfc_sglq *start_sglq = NULL; 841 struct lpfc_sglq *start_sglq = NULL;
857 uint16_t adj_xri;
858 struct lpfc_scsi_buf *lpfc_cmd; 842 struct lpfc_scsi_buf *lpfc_cmd;
859 struct lpfc_nodelist *ndlp; 843 struct lpfc_nodelist *ndlp;
860 int found = 0; 844 int found = 0;
@@ -873,8 +857,6 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
873 while (!found) { 857 while (!found) {
874 if (!sglq) 858 if (!sglq)
875 return NULL; 859 return NULL;
876 adj_xri = sglq->sli4_xritag -
877 phba->sli4_hba.max_cfg_param.xri_base;
878 if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) { 860 if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
879 /* This xri has an rrq outstanding for this DID. 861 /* This xri has an rrq outstanding for this DID.
880 * put it back in the list and get another xri. 862 * put it back in the list and get another xri.
@@ -891,7 +873,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
891 } 873 }
892 sglq->ndlp = ndlp; 874 sglq->ndlp = ndlp;
893 found = 1; 875 found = 1;
894 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; 876 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
895 sglq->state = SGL_ALLOCATED; 877 sglq->state = SGL_ALLOCATED;
896 } 878 }
897 return sglq; 879 return sglq;
@@ -947,7 +929,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
947 if (iocbq->sli4_xritag == NO_XRI) 929 if (iocbq->sli4_xritag == NO_XRI)
948 sglq = NULL; 930 sglq = NULL;
949 else 931 else
950 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); 932 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
933
951 if (sglq) { 934 if (sglq) {
952 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && 935 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
953 (sglq->state != SGL_XRI_ABORTED)) { 936 (sglq->state != SGL_XRI_ABORTED)) {
@@ -974,6 +957,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
974 * Clean all volatile data fields, preserve iotag and node struct. 957 * Clean all volatile data fields, preserve iotag and node struct.
975 */ 958 */
976 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 959 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
960 iocbq->sli4_lxritag = NO_XRI;
977 iocbq->sli4_xritag = NO_XRI; 961 iocbq->sli4_xritag = NO_XRI;
978 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 962 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
979} 963}
@@ -2116,7 +2100,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2116 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && 2100 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2117 !pmb->u.mb.mbxStatus) { 2101 !pmb->u.mb.mbxStatus) {
2118 rpi = pmb->u.mb.un.varWords[0]; 2102 rpi = pmb->u.mb.un.varWords[0];
2119 vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base; 2103 vpi = pmb->u.mb.un.varRegLogin.vpi;
2120 lpfc_unreg_login(phba, vpi, rpi, pmb); 2104 lpfc_unreg_login(phba, vpi, rpi, pmb);
2121 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2105 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2122 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2106 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
@@ -4323,6 +4307,7 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4323 continue; 4307 continue;
4324 } else if (rc) 4308 } else if (rc)
4325 break; 4309 break;
4310
4326 phba->link_state = LPFC_INIT_MBX_CMDS; 4311 phba->link_state = LPFC_INIT_MBX_CMDS;
4327 lpfc_config_port(phba, pmb); 4312 lpfc_config_port(phba, pmb);
4328 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 4313 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
@@ -4426,7 +4411,8 @@ int
4426lpfc_sli_hba_setup(struct lpfc_hba *phba) 4411lpfc_sli_hba_setup(struct lpfc_hba *phba)
4427{ 4412{
4428 uint32_t rc; 4413 uint32_t rc;
4429 int mode = 3; 4414 int mode = 3, i;
4415 int longs;
4430 4416
4431 switch (lpfc_sli_mode) { 4417 switch (lpfc_sli_mode) {
4432 case 2: 4418 case 2:
@@ -4496,6 +4482,35 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
4496 if (rc) 4482 if (rc)
4497 goto lpfc_sli_hba_setup_error; 4483 goto lpfc_sli_hba_setup_error;
4498 4484
4485 /* Initialize VPIs. */
4486 if (phba->sli_rev == LPFC_SLI_REV3) {
4487 /*
4488 * The VPI bitmask and physical ID array are allocated
4489 * and initialized once only - at driver load. A port
4490 * reset doesn't need to reinitialize this memory.
4491 */
4492 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
4493 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
4494 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
4495 GFP_KERNEL);
4496 if (!phba->vpi_bmask) {
4497 rc = -ENOMEM;
4498 goto lpfc_sli_hba_setup_error;
4499 }
4500
4501 phba->vpi_ids = kzalloc(
4502 (phba->max_vpi+1) * sizeof(uint16_t),
4503 GFP_KERNEL);
4504 if (!phba->vpi_ids) {
4505 kfree(phba->vpi_bmask);
4506 rc = -ENOMEM;
4507 goto lpfc_sli_hba_setup_error;
4508 }
4509 for (i = 0; i < phba->max_vpi; i++)
4510 phba->vpi_ids[i] = i;
4511 }
4512 }
4513
4499 /* Init HBQs */ 4514 /* Init HBQs */
4500 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 4515 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4501 rc = lpfc_sli_hbq_setup(phba); 4516 rc = lpfc_sli_hbq_setup(phba);
@@ -4694,6 +4709,803 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4694} 4709}
4695 4710
4696/** 4711/**
4712 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
4713 * @phba: Pointer to HBA context object.
4714 * @type: The resource extent type.
4715 *
4716 * This function allocates all SLI4 resource identifiers.
4717 **/
4718static int
4719lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
4720 uint16_t *extnt_count, uint16_t *extnt_size)
4721{
4722 int rc = 0;
4723 uint32_t length;
4724 uint32_t mbox_tmo;
4725 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
4726 LPFC_MBOXQ_t *mbox;
4727
4728 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4729 if (!mbox)
4730 return -ENOMEM;
4731
4732 /* Find out how many extents are available for this resource type */
4733 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
4734 sizeof(struct lpfc_sli4_cfg_mhdr));
4735 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
4736 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
4737 length, LPFC_SLI4_MBX_EMBED);
4738
4739 /* Send an extents count of 0 - the GET doesn't use it. */
4740 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
4741 LPFC_SLI4_MBX_EMBED);
4742 if (unlikely(rc)) {
4743 rc = -EIO;
4744 goto err_exit;
4745 }
4746
4747 if (!phba->sli4_hba.intr_enable)
4748 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
4749 else {
4750 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
4751 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
4752 }
4753 if (unlikely(rc)) {
4754 rc = -EIO;
4755 goto err_exit;
4756 }
4757
4758 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
4759 if (bf_get(lpfc_mbox_hdr_status,
4760 &rsrc_info->header.cfg_shdr.response)) {
4761 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
4762 "2930 Failed to get resource extents "
4763 "Status 0x%x Add'l Status 0x%x\n",
4764 bf_get(lpfc_mbox_hdr_status,
4765 &rsrc_info->header.cfg_shdr.response),
4766 bf_get(lpfc_mbox_hdr_add_status,
4767 &rsrc_info->header.cfg_shdr.response));
4768 rc = -EIO;
4769 goto err_exit;
4770 }
4771
4772 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
4773 &rsrc_info->u.rsp);
4774 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
4775 &rsrc_info->u.rsp);
4776 err_exit:
4777 mempool_free(mbox, phba->mbox_mem_pool);
4778 return rc;
4779}
4780
4781/**
4782 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
4783 * @phba: Pointer to HBA context object.
4784 * @type: The extent type to check.
4785 *
4786 * This function reads the current available extents from the port and checks
4787 * if the extent count or extent size has changed since the last access.
4788 * Callers use this routine post port reset to understand if there is a
4789 * extent reprovisioning requirement.
4790 *
4791 * Returns:
4792 * -Error: error indicates problem.
4793 * 1: Extent count or size has changed.
4794 * 0: No changes.
4795 **/
4796static int
4797lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
4798{
4799 uint16_t curr_ext_cnt, rsrc_ext_cnt;
4800 uint16_t size_diff, rsrc_ext_size;
4801 int rc = 0;
4802 struct lpfc_rsrc_blks *rsrc_entry;
4803 struct list_head *rsrc_blk_list = NULL;
4804
4805 size_diff = 0;
4806 curr_ext_cnt = 0;
4807 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
4808 &rsrc_ext_cnt,
4809 &rsrc_ext_size);
4810 if (unlikely(rc))
4811 return -EIO;
4812
4813 switch (type) {
4814 case LPFC_RSC_TYPE_FCOE_RPI:
4815 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
4816 break;
4817 case LPFC_RSC_TYPE_FCOE_VPI:
4818 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
4819 break;
4820 case LPFC_RSC_TYPE_FCOE_XRI:
4821 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
4822 break;
4823 case LPFC_RSC_TYPE_FCOE_VFI:
4824 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
4825 break;
4826 default:
4827 break;
4828 }
4829
4830 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
4831 curr_ext_cnt++;
4832 if (rsrc_entry->rsrc_size != rsrc_ext_size)
4833 size_diff++;
4834 }
4835
4836 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
4837 rc = 1;
4838
4839 return rc;
4840}
4841
4842/**
4843 * lpfc_sli4_cfg_post_extnts -
4844 * @phba: Pointer to HBA context object.
4845 * @extnt_cnt - number of available extents.
4846 * @type - the extent type (rpi, xri, vfi, vpi).
4847 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
4848 * @mbox - pointer to the caller's allocated mailbox structure.
4849 *
4850 * This function executes the extents allocation request. It also
4851 * takes care of the amount of memory needed to allocate or get the
4852 * allocated extents. It is the caller's responsibility to evaluate
4853 * the response.
4854 *
4855 * Returns:
4856 * -Error: Error value describes the condition found.
4857 * 0: if successful
4858 **/
4859static int
4860lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
4861 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
4862{
4863 int rc = 0;
4864 uint32_t req_len;
4865 uint32_t emb_len;
4866 uint32_t alloc_len, mbox_tmo;
4867
4868 /* Calculate the total requested length of the dma memory */
4869 req_len = *extnt_cnt * sizeof(uint16_t);
4870
4871 /*
4872 * Calculate the size of an embedded mailbox. The uint32_t
4873 * accounts for extents-specific word.
4874 */
4875 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
4876 sizeof(uint32_t);
4877
4878 /*
4879 * Presume the allocation and response will fit into an embedded
4880 * mailbox. If not true, reconfigure to a non-embedded mailbox.
4881 */
4882 *emb = LPFC_SLI4_MBX_EMBED;
4883 if (req_len > emb_len) {
4884 req_len = *extnt_cnt * sizeof(uint16_t) +
4885 sizeof(union lpfc_sli4_cfg_shdr) +
4886 sizeof(uint32_t);
4887 *emb = LPFC_SLI4_MBX_NEMBED;
4888 }
4889
4890 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
4891 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
4892 req_len, *emb);
4893 if (alloc_len < req_len) {
4894 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4895 "9000 Allocated DMA memory size (x%x) is "
4896 "less than the requested DMA memory "
4897 "size (x%x)\n", alloc_len, req_len);
4898 return -ENOMEM;
4899 }
4900 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, *extnt_cnt, type, *emb);
4901 if (unlikely(rc))
4902 return -EIO;
4903
4904 if (!phba->sli4_hba.intr_enable)
4905 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
4906 else {
4907 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
4908 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
4909 }
4910
4911 if (unlikely(rc))
4912 rc = -EIO;
4913 return rc;
4914}
4915
4916/**
4917 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
4918 * @phba: Pointer to HBA context object.
4919 * @type: The resource extent type to allocate.
4920 *
4921 * This function allocates the number of elements for the specified
4922 * resource type.
4923 **/
4924static int
4925lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
4926{
4927 bool emb = false;
4928 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
4929 uint16_t rsrc_id, rsrc_start, j, k;
4930 uint16_t *ids;
4931 int i, rc;
4932 unsigned long longs;
4933 unsigned long *bmask;
4934 struct lpfc_rsrc_blks *rsrc_blks;
4935 LPFC_MBOXQ_t *mbox;
4936 uint32_t length;
4937 struct lpfc_id_range *id_array = NULL;
4938 void *virtaddr = NULL;
4939 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
4940 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
4941 struct list_head *ext_blk_list;
4942
4943 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
4944 &rsrc_cnt,
4945 &rsrc_size);
4946 if (unlikely(rc))
4947 return -EIO;
4948
4949 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
4950 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
4951 "3009 No available Resource Extents "
4952 "for resource type 0x%x: Count: 0x%x, "
4953 "Size 0x%x\n", type, rsrc_cnt,
4954 rsrc_size);
4955 return -ENOMEM;
4956 }
4957
4958 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT,
4959 "2903 Available Resource Extents "
4960 "for resource type 0x%x: Count: 0x%x, "
4961 "Size 0x%x\n", type, rsrc_cnt,
4962 rsrc_size);
4963
4964 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4965 if (!mbox)
4966 return -ENOMEM;
4967
4968 rc = lpfc_sli4_cfg_post_extnts(phba, &rsrc_cnt, type, &emb, mbox);
4969 if (unlikely(rc)) {
4970 rc = -EIO;
4971 goto err_exit;
4972 }
4973
4974 /*
4975 * Figure out where the response is located. Then get local pointers
4976 * to the response data. The port does not guarantee to respond to
4977 * all extents counts request so update the local variable with the
4978 * allocated count from the port.
4979 */
4980 if (emb == LPFC_SLI4_MBX_EMBED) {
4981 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
4982 id_array = &rsrc_ext->u.rsp.id[0];
4983 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
4984 } else {
4985 virtaddr = mbox->sge_array->addr[0];
4986 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
4987 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
4988 id_array = &n_rsrc->id;
4989 }
4990
4991 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
4992 rsrc_id_cnt = rsrc_cnt * rsrc_size;
4993
4994 /*
4995 * Based on the resource size and count, correct the base and max
4996 * resource values.
4997 */
4998 length = sizeof(struct lpfc_rsrc_blks);
4999 switch (type) {
5000 case LPFC_RSC_TYPE_FCOE_RPI:
5001 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5002 sizeof(unsigned long),
5003 GFP_KERNEL);
5004 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5005 rc = -ENOMEM;
5006 goto err_exit;
5007 }
5008 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
5009 sizeof(uint16_t),
5010 GFP_KERNEL);
5011 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5012 kfree(phba->sli4_hba.rpi_bmask);
5013 rc = -ENOMEM;
5014 goto err_exit;
5015 }
5016
5017 /*
5018 * The next_rpi was initialized with the maximum available
5019 * count but the port may allocate a smaller number. Catch
5020 * that case and update the next_rpi.
5021 */
5022 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5023
5024 /* Initialize local ptrs for common extent processing later. */
5025 bmask = phba->sli4_hba.rpi_bmask;
5026 ids = phba->sli4_hba.rpi_ids;
5027 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5028 break;
5029 case LPFC_RSC_TYPE_FCOE_VPI:
5030 phba->vpi_bmask = kzalloc(longs *
5031 sizeof(unsigned long),
5032 GFP_KERNEL);
5033 if (unlikely(!phba->vpi_bmask)) {
5034 rc = -ENOMEM;
5035 goto err_exit;
5036 }
5037 phba->vpi_ids = kzalloc(rsrc_id_cnt *
5038 sizeof(uint16_t),
5039 GFP_KERNEL);
5040 if (unlikely(!phba->vpi_ids)) {
5041 kfree(phba->vpi_bmask);
5042 rc = -ENOMEM;
5043 goto err_exit;
5044 }
5045
5046 /* Initialize local ptrs for common extent processing later. */
5047 bmask = phba->vpi_bmask;
5048 ids = phba->vpi_ids;
5049 ext_blk_list = &phba->lpfc_vpi_blk_list;
5050 break;
5051 case LPFC_RSC_TYPE_FCOE_XRI:
5052 phba->sli4_hba.xri_bmask = kzalloc(longs *
5053 sizeof(unsigned long),
5054 GFP_KERNEL);
5055 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5056 rc = -ENOMEM;
5057 goto err_exit;
5058 }
5059 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
5060 sizeof(uint16_t),
5061 GFP_KERNEL);
5062 if (unlikely(!phba->sli4_hba.xri_ids)) {
5063 kfree(phba->sli4_hba.xri_bmask);
5064 rc = -ENOMEM;
5065 goto err_exit;
5066 }
5067
5068 /* Initialize local ptrs for common extent processing later. */
5069 bmask = phba->sli4_hba.xri_bmask;
5070 ids = phba->sli4_hba.xri_ids;
5071 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5072 break;
5073 case LPFC_RSC_TYPE_FCOE_VFI:
5074 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5075 sizeof(unsigned long),
5076 GFP_KERNEL);
5077 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5078 rc = -ENOMEM;
5079 goto err_exit;
5080 }
5081 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
5082 sizeof(uint16_t),
5083 GFP_KERNEL);
5084 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5085 kfree(phba->sli4_hba.vfi_bmask);
5086 rc = -ENOMEM;
5087 goto err_exit;
5088 }
5089
5090 /* Initialize local ptrs for common extent processing later. */
5091 bmask = phba->sli4_hba.vfi_bmask;
5092 ids = phba->sli4_hba.vfi_ids;
5093 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5094 break;
5095 default:
5096 /* Unsupported Opcode. Fail call. */
5097 id_array = NULL;
5098 bmask = NULL;
5099 ids = NULL;
5100 ext_blk_list = NULL;
5101 goto err_exit;
5102 }
5103
5104 /*
5105 * Complete initializing the extent configuration with the
5106 * allocated ids assigned to this function. The bitmask serves
5107 * as an index into the array and manages the available ids. The
5108 * array just stores the ids communicated to the port via the wqes.
5109 */
5110 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5111 if ((i % 2) == 0)
5112 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5113 &id_array[k]);
5114 else
5115 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
5116 &id_array[k]);
5117
5118 rsrc_blks = kzalloc(length, GFP_KERNEL);
5119 if (unlikely(!rsrc_blks)) {
5120 rc = -ENOMEM;
5121 kfree(bmask);
5122 kfree(ids);
5123 goto err_exit;
5124 }
5125 rsrc_blks->rsrc_start = rsrc_id;
5126 rsrc_blks->rsrc_size = rsrc_size;
5127 list_add_tail(&rsrc_blks->list, ext_blk_list);
5128 rsrc_start = rsrc_id;
5129 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0))
5130 phba->sli4_hba.scsi_xri_start = rsrc_start +
5131 lpfc_sli4_get_els_iocb_cnt(phba);
5132
5133 while (rsrc_id < (rsrc_start + rsrc_size)) {
5134 ids[j] = rsrc_id;
5135 rsrc_id++;
5136 j++;
5137 }
5138 /* Entire word processed. Get next word.*/
5139 if ((i % 2) == 1)
5140 k++;
5141 }
5142 err_exit:
5143 lpfc_sli4_mbox_cmd_free(phba, mbox);
5144 return rc;
5145}
5146
5147/**
5148 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
5149 * @phba: Pointer to HBA context object.
5150 * @type: the extent's type.
5151 *
5152 * This function deallocates all extents of a particular resource type.
5153 * SLI4 does not allow for deallocating a particular extent range. It
5154 * is the caller's responsibility to release all kernel memory resources.
5155 **/
5156static int
5157lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5158{
5159 int rc;
5160 uint32_t length, mbox_tmo = 0;
5161 LPFC_MBOXQ_t *mbox;
5162 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
5163 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
5164
5165 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5166 if (!mbox)
5167 return -ENOMEM;
5168
5169 /*
5170 * This function sends an embedded mailbox because it only sends the
5171 * the resource type. All extents of this type are released by the
5172 * port.
5173 */
5174 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
5175 sizeof(struct lpfc_sli4_cfg_mhdr));
5176 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5177 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
5178 length, LPFC_SLI4_MBX_EMBED);
5179
5180 /* Send an extents count of 0 - the dealloc doesn't use it. */
5181 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5182 LPFC_SLI4_MBX_EMBED);
5183 if (unlikely(rc)) {
5184 rc = -EIO;
5185 goto out_free_mbox;
5186 }
5187 if (!phba->sli4_hba.intr_enable)
5188 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5189 else {
5190 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox_tmo);
5191 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5192 }
5193 if (unlikely(rc)) {
5194 rc = -EIO;
5195 goto out_free_mbox;
5196 }
5197
5198 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
5199 if (bf_get(lpfc_mbox_hdr_status,
5200 &dealloc_rsrc->header.cfg_shdr.response)) {
5201 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5202 "2919 Failed to release resource extents "
5203 "for type %d - Status 0x%x Add'l Status 0x%x. "
5204 "Resource memory not released.\n",
5205 type,
5206 bf_get(lpfc_mbox_hdr_status,
5207 &dealloc_rsrc->header.cfg_shdr.response),
5208 bf_get(lpfc_mbox_hdr_add_status,
5209 &dealloc_rsrc->header.cfg_shdr.response));
5210 rc = -EIO;
5211 goto out_free_mbox;
5212 }
5213
5214 /* Release kernel memory resources for the specific type. */
5215 switch (type) {
5216 case LPFC_RSC_TYPE_FCOE_VPI:
5217 kfree(phba->vpi_bmask);
5218 kfree(phba->vpi_ids);
5219 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5220 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5221 &phba->lpfc_vpi_blk_list, list) {
5222 list_del_init(&rsrc_blk->list);
5223 kfree(rsrc_blk);
5224 }
5225 break;
5226 case LPFC_RSC_TYPE_FCOE_XRI:
5227 kfree(phba->sli4_hba.xri_bmask);
5228 kfree(phba->sli4_hba.xri_ids);
5229 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5230 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5231 &phba->sli4_hba.lpfc_xri_blk_list, list) {
5232 list_del_init(&rsrc_blk->list);
5233 kfree(rsrc_blk);
5234 }
5235 break;
5236 case LPFC_RSC_TYPE_FCOE_VFI:
5237 kfree(phba->sli4_hba.vfi_bmask);
5238 kfree(phba->sli4_hba.vfi_ids);
5239 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5240 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5241 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
5242 list_del_init(&rsrc_blk->list);
5243 kfree(rsrc_blk);
5244 }
5245 break;
5246 case LPFC_RSC_TYPE_FCOE_RPI:
5247 /* RPI bitmask and physical id array are cleaned up earlier. */
5248 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5249 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
5250 list_del_init(&rsrc_blk->list);
5251 kfree(rsrc_blk);
5252 }
5253 break;
5254 default:
5255 break;
5256 }
5257
5258 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5259
5260 out_free_mbox:
5261 mempool_free(mbox, phba->mbox_mem_pool);
5262 return rc;
5263}
5264
5265/**
5266 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
5267 * @phba: Pointer to HBA context object.
5268 *
5269 * This function allocates all SLI4 resource identifiers.
5270 **/
5271int
5272lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5273{
5274 int i, rc, error = 0;
5275 uint16_t count, base;
5276 unsigned long longs;
5277
5278 if (phba->sli4_hba.extents_in_use) {
5279 /*
5280 * The port supports resource extents. The XRI, VPI, VFI, RPI
5281 * resource extent count must be read and allocated before
5282 * provisioning the resource id arrays.
5283 */
5284 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5285 LPFC_IDX_RSRC_RDY) {
5286 /*
5287 * Extent-based resources are set - the driver could
5288 * be in a port reset. Figure out if any corrective
5289 * actions need to be taken.
5290 */
5291 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5292 LPFC_RSC_TYPE_FCOE_VFI);
5293 if (rc != 0)
5294 error++;
5295 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5296 LPFC_RSC_TYPE_FCOE_VPI);
5297 if (rc != 0)
5298 error++;
5299 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5300 LPFC_RSC_TYPE_FCOE_XRI);
5301 if (rc != 0)
5302 error++;
5303 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5304 LPFC_RSC_TYPE_FCOE_RPI);
5305 if (rc != 0)
5306 error++;
5307
5308 /*
5309 * It's possible that the number of resources
5310 * provided to this port instance changed between
5311 * resets. Detect this condition and reallocate
5312 * resources. Otherwise, there is no action.
5313 */
5314 if (error) {
5315 lpfc_printf_log(phba, KERN_INFO,
5316 LOG_MBOX | LOG_INIT,
5317 "2931 Detected extent resource "
5318 "change. Reallocating all "
5319 "extents.\n");
5320 rc = lpfc_sli4_dealloc_extent(phba,
5321 LPFC_RSC_TYPE_FCOE_VFI);
5322 rc = lpfc_sli4_dealloc_extent(phba,
5323 LPFC_RSC_TYPE_FCOE_VPI);
5324 rc = lpfc_sli4_dealloc_extent(phba,
5325 LPFC_RSC_TYPE_FCOE_XRI);
5326 rc = lpfc_sli4_dealloc_extent(phba,
5327 LPFC_RSC_TYPE_FCOE_RPI);
5328 } else
5329 return 0;
5330 }
5331
5332 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5333 if (unlikely(rc))
5334 goto err_exit;
5335
5336 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5337 if (unlikely(rc))
5338 goto err_exit;
5339
5340 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5341 if (unlikely(rc))
5342 goto err_exit;
5343
5344 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5345 if (unlikely(rc))
5346 goto err_exit;
5347 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5348 LPFC_IDX_RSRC_RDY);
5349 return rc;
5350 } else {
5351 /*
5352 * The port does not support resource extents. The XRI, VPI,
5353 * VFI, RPI resource ids were determined from READ_CONFIG.
5354 * Just allocate the bitmasks and provision the resource id
5355 * arrays. If a port reset is active, the resources don't
5356 * need any action - just exit.
5357 */
5358 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5359 LPFC_IDX_RSRC_RDY)
5360 return 0;
5361
5362 /* RPIs. */
5363 count = phba->sli4_hba.max_cfg_param.max_rpi;
5364 base = phba->sli4_hba.max_cfg_param.rpi_base;
5365 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5366 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5367 sizeof(unsigned long),
5368 GFP_KERNEL);
5369 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5370 rc = -ENOMEM;
5371 goto err_exit;
5372 }
5373 phba->sli4_hba.rpi_ids = kzalloc(count *
5374 sizeof(uint16_t),
5375 GFP_KERNEL);
5376 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5377 rc = -ENOMEM;
5378 goto free_rpi_bmask;
5379 }
5380
5381 for (i = 0; i < count; i++)
5382 phba->sli4_hba.rpi_ids[i] = base + i;
5383
5384 /* VPIs. */
5385 count = phba->sli4_hba.max_cfg_param.max_vpi;
5386 base = phba->sli4_hba.max_cfg_param.vpi_base;
5387 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5388 phba->vpi_bmask = kzalloc(longs *
5389 sizeof(unsigned long),
5390 GFP_KERNEL);
5391 if (unlikely(!phba->vpi_bmask)) {
5392 rc = -ENOMEM;
5393 goto free_rpi_ids;
5394 }
5395 phba->vpi_ids = kzalloc(count *
5396 sizeof(uint16_t),
5397 GFP_KERNEL);
5398 if (unlikely(!phba->vpi_ids)) {
5399 rc = -ENOMEM;
5400 goto free_vpi_bmask;
5401 }
5402
5403 for (i = 0; i < count; i++)
5404 phba->vpi_ids[i] = base + i;
5405
5406 /* XRIs. */
5407 count = phba->sli4_hba.max_cfg_param.max_xri;
5408 base = phba->sli4_hba.max_cfg_param.xri_base;
5409 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5410 phba->sli4_hba.xri_bmask = kzalloc(longs *
5411 sizeof(unsigned long),
5412 GFP_KERNEL);
5413 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5414 rc = -ENOMEM;
5415 goto free_vpi_ids;
5416 }
5417 phba->sli4_hba.xri_ids = kzalloc(count *
5418 sizeof(uint16_t),
5419 GFP_KERNEL);
5420 if (unlikely(!phba->sli4_hba.xri_ids)) {
5421 rc = -ENOMEM;
5422 goto free_xri_bmask;
5423 }
5424
5425 for (i = 0; i < count; i++)
5426 phba->sli4_hba.xri_ids[i] = base + i;
5427
5428 /* VFIs. */
5429 count = phba->sli4_hba.max_cfg_param.max_vfi;
5430 base = phba->sli4_hba.max_cfg_param.vfi_base;
5431 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5432 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5433 sizeof(unsigned long),
5434 GFP_KERNEL);
5435 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5436 rc = -ENOMEM;
5437 goto free_xri_ids;
5438 }
5439 phba->sli4_hba.vfi_ids = kzalloc(count *
5440 sizeof(uint16_t),
5441 GFP_KERNEL);
5442 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5443 rc = -ENOMEM;
5444 goto free_vfi_bmask;
5445 }
5446
5447 for (i = 0; i < count; i++)
5448 phba->sli4_hba.vfi_ids[i] = base + i;
5449
5450 /*
5451 * Mark all resources ready. An HBA reset doesn't need
5452 * to reset the initialization.
5453 */
5454 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5455 LPFC_IDX_RSRC_RDY);
5456 return 0;
5457 }
5458
5459 free_vfi_bmask:
5460 kfree(phba->sli4_hba.vfi_bmask);
5461 free_xri_ids:
5462 kfree(phba->sli4_hba.xri_ids);
5463 free_xri_bmask:
5464 kfree(phba->sli4_hba.xri_bmask);
5465 free_vpi_ids:
5466 kfree(phba->vpi_ids);
5467 free_vpi_bmask:
5468 kfree(phba->vpi_bmask);
5469 free_rpi_ids:
5470 kfree(phba->sli4_hba.rpi_ids);
5471 free_rpi_bmask:
5472 kfree(phba->sli4_hba.rpi_bmask);
5473 err_exit:
5474 return rc;
5475}
5476
5477/**
5478 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
5479 * @phba: Pointer to HBA context object.
5480 *
5481 * This function allocates the number of elements for the specified
5482 * resource type.
5483 **/
5484int
5485lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
5486{
5487 if (phba->sli4_hba.extents_in_use) {
5488 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5489 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5490 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5491 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5492 } else {
5493 kfree(phba->vpi_bmask);
5494 kfree(phba->vpi_ids);
5495 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5496 kfree(phba->sli4_hba.xri_bmask);
5497 kfree(phba->sli4_hba.xri_ids);
5498 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5499 kfree(phba->sli4_hba.vfi_bmask);
5500 kfree(phba->sli4_hba.vfi_ids);
5501 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5502 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5503 }
5504
5505 return 0;
5506}
5507
5508/**
4697 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function 5509 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
4698 * @phba: Pointer to HBA context object. 5510 * @phba: Pointer to HBA context object.
4699 * 5511 *
@@ -4715,10 +5527,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4715 struct lpfc_vport *vport = phba->pport; 5527 struct lpfc_vport *vport = phba->pport;
4716 struct lpfc_dmabuf *mp; 5528 struct lpfc_dmabuf *mp;
4717 5529
4718 /*
4719 * TODO: Why does this routine execute these task in a different
4720 * order from probe?
4721 */
4722 /* Perform a PCI function reset to start from clean */ 5530 /* Perform a PCI function reset to start from clean */
4723 rc = lpfc_pci_function_reset(phba); 5531 rc = lpfc_pci_function_reset(phba);
4724 if (unlikely(rc)) 5532 if (unlikely(rc))
@@ -4880,6 +5688,18 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4880 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); 5688 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
4881 spin_unlock_irq(&phba->hbalock); 5689 spin_unlock_irq(&phba->hbalock);
4882 5690
5691 /*
5692 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
5693 * calls depends on these resources to complete port setup.
5694 */
5695 rc = lpfc_sli4_alloc_resource_identifiers(phba);
5696 if (rc) {
5697 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5698 "2920 Failed to alloc Resource IDs "
5699 "rc = x%x\n", rc);
5700 goto out_free_mbox;
5701 }
5702
4883 /* Read the port's service parameters. */ 5703 /* Read the port's service parameters. */
4884 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 5704 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
4885 if (rc) { 5705 if (rc) {
@@ -4920,19 +5740,30 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4920 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 5740 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4921 5741
4922 /* Register SGL pool to the device using non-embedded mailbox command */ 5742 /* Register SGL pool to the device using non-embedded mailbox command */
4923 rc = lpfc_sli4_post_sgl_list(phba); 5743 if (!phba->sli4_hba.extents_in_use) {
4924 if (unlikely(rc)) { 5744 rc = lpfc_sli4_post_els_sgl_list(phba);
4925 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5745 if (unlikely(rc)) {
4926 "0582 Error %d during sgl post operation\n", 5746 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4927 rc); 5747 "0582 Error %d during els sgl post "
4928 rc = -ENODEV; 5748 "operation\n", rc);
4929 goto out_free_mbox; 5749 rc = -ENODEV;
5750 goto out_free_mbox;
5751 }
5752 } else {
5753 rc = lpfc_sli4_post_els_sgl_list_ext(phba);
5754 if (unlikely(rc)) {
5755 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5756 "2560 Error %d during els sgl post "
5757 "operation\n", rc);
5758 rc = -ENODEV;
5759 goto out_free_mbox;
5760 }
4930 } 5761 }
4931 5762
4932 /* Register SCSI SGL pool to the device */ 5763 /* Register SCSI SGL pool to the device */
4933 rc = lpfc_sli4_repost_scsi_sgl_list(phba); 5764 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
4934 if (unlikely(rc)) { 5765 if (unlikely(rc)) {
4935 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 5766 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4936 "0383 Error %d during scsi sgl post " 5767 "0383 Error %d during scsi sgl post "
4937 "operation\n", rc); 5768 "operation\n", rc);
4938 /* Some Scsi buffers were moved to the abort scsi list */ 5769 /* Some Scsi buffers were moved to the abort scsi list */
@@ -6479,7 +7310,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6479 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) 7310 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
6480 >> LPFC_FIP_ELS_ID_SHIFT); 7311 >> LPFC_FIP_ELS_ID_SHIFT);
6481 } 7312 }
6482 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, ndlp->nlp_rpi); 7313 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
7314 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
6483 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); 7315 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
6484 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); 7316 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
6485 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); 7317 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
@@ -6628,14 +7460,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6628 iocbq->iocb.ulpContext); 7460 iocbq->iocb.ulpContext);
6629 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) 7461 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
6630 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 7462 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
6631 iocbq->vport->vpi + phba->vpi_base); 7463 phba->vpi_ids[iocbq->vport->vpi]);
6632 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); 7464 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
6633 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); 7465 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
6634 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); 7466 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
6635 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, 7467 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
6636 LPFC_WQE_LENLOC_WORD3); 7468 LPFC_WQE_LENLOC_WORD3);
6637 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); 7469 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
6638 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, ndlp->nlp_rpi); 7470 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
7471 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
6639 command_type = OTHER_COMMAND; 7472 command_type = OTHER_COMMAND;
6640 break; 7473 break;
6641 case CMD_CLOSE_XRI_CN: 7474 case CMD_CLOSE_XRI_CN:
@@ -6734,6 +7567,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6734 return IOCB_ERROR; 7567 return IOCB_ERROR;
6735 break; 7568 break;
6736 } 7569 }
7570
6737 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 7571 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
6738 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 7572 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
6739 wqe->generic.wqe_com.abort_tag = abort_tag; 7573 wqe->generic.wqe_com.abort_tag = abort_tag;
@@ -6781,7 +7615,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6781 return IOCB_BUSY; 7615 return IOCB_BUSY;
6782 } 7616 }
6783 } else { 7617 } else {
6784 sglq = __lpfc_sli_get_sglq(phba, piocb); 7618 sglq = __lpfc_sli_get_sglq(phba, piocb);
6785 if (!sglq) { 7619 if (!sglq) {
6786 if (!(flag & SLI_IOCB_RET_IOCB)) { 7620 if (!(flag & SLI_IOCB_RET_IOCB)) {
6787 __lpfc_sli_ringtx_put(phba, 7621 __lpfc_sli_ringtx_put(phba,
@@ -6794,11 +7628,11 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6794 } 7628 }
6795 } 7629 }
6796 } else if (piocb->iocb_flag & LPFC_IO_FCP) { 7630 } else if (piocb->iocb_flag & LPFC_IO_FCP) {
6797 sglq = NULL; /* These IO's already have an XRI and 7631 /* These IO's already have an XRI and a mapped sgl. */
6798 * a mapped sgl. 7632 sglq = NULL;
6799 */
6800 } else { 7633 } else {
6801 /* This is a continuation of a commandi,(CX) so this 7634 /*
7635 * This is a continuation of a commandi,(CX) so this
6802 * sglq is on the active list 7636 * sglq is on the active list
6803 */ 7637 */
6804 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag); 7638 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
@@ -6807,8 +7641,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6807 } 7641 }
6808 7642
6809 if (sglq) { 7643 if (sglq) {
7644 piocb->sli4_lxritag = sglq->sli4_lxritag;
6810 piocb->sli4_xritag = sglq->sli4_xritag; 7645 piocb->sli4_xritag = sglq->sli4_xritag;
6811
6812 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq)) 7646 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
6813 return IOCB_ERROR; 7647 return IOCB_ERROR;
6814 } 7648 }
@@ -11456,6 +12290,7 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
11456 LPFC_MBOXQ_t *mbox; 12290 LPFC_MBOXQ_t *mbox;
11457 int rc; 12291 int rc;
11458 uint32_t shdr_status, shdr_add_status; 12292 uint32_t shdr_status, shdr_add_status;
12293 uint32_t mbox_tmo;
11459 union lpfc_sli4_cfg_shdr *shdr; 12294 union lpfc_sli4_cfg_shdr *shdr;
11460 12295
11461 if (xritag == NO_XRI) { 12296 if (xritag == NO_XRI) {
@@ -11489,8 +12324,10 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
11489 cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); 12324 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
11490 if (!phba->sli4_hba.intr_enable) 12325 if (!phba->sli4_hba.intr_enable)
11491 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12326 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
11492 else 12327 else {
11493 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 12328 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
12329 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
12330 }
11494 /* The IOCTL status is embedded in the mailbox subheader. */ 12331 /* The IOCTL status is embedded in the mailbox subheader. */
11495 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; 12332 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
11496 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12333 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
@@ -11508,6 +12345,76 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
11508} 12345}
11509 12346
11510/** 12347/**
12348 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
12349 * @phba: pointer to lpfc hba data structure.
12350 *
12351 * This routine is invoked to post rpi header templates to the
12352 * port for those SLI4 ports that do not support extents. This routine
12353 * posts a PAGE_SIZE memory region to the port to hold up to
12354 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
12355 * and should be called only when interrupts are disabled.
12356 *
12357 * Return codes
12358 * 0 - successful
12359 * -ERROR - otherwise.
12360 */
12361uint16_t
12362lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
12363{
12364 unsigned long xri;
12365
12366 /*
12367 * Fetch the next logical xri. Because this index is logical,
12368 * the driver starts at 0 each time.
12369 */
12370 spin_lock_irq(&phba->hbalock);
12371 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
12372 phba->sli4_hba.max_cfg_param.max_xri, 0);
12373 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
12374 spin_unlock_irq(&phba->hbalock);
12375 return NO_XRI;
12376 } else {
12377 set_bit(xri, phba->sli4_hba.xri_bmask);
12378 phba->sli4_hba.max_cfg_param.xri_used++;
12379 phba->sli4_hba.xri_count++;
12380 }
12381
12382 spin_unlock_irq(&phba->hbalock);
12383 return xri;
12384}
12385
12386/**
12387 * lpfc_sli4_free_xri - Release an xri for reuse.
12388 * @phba: pointer to lpfc hba data structure.
12389 *
12390 * This routine is invoked to release an xri to the pool of
12391 * available rpis maintained by the driver.
12392 **/
12393void
12394__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
12395{
12396 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
12397 phba->sli4_hba.xri_count--;
12398 phba->sli4_hba.max_cfg_param.xri_used--;
12399 }
12400}
12401
12402/**
12403 * lpfc_sli4_free_xri - Release an xri for reuse.
12404 * @phba: pointer to lpfc hba data structure.
12405 *
12406 * This routine is invoked to release an xri to the pool of
12407 * available rpis maintained by the driver.
12408 **/
12409void
12410lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
12411{
12412 spin_lock_irq(&phba->hbalock);
12413 __lpfc_sli4_free_xri(phba, xri);
12414 spin_unlock_irq(&phba->hbalock);
12415}
12416
12417/**
11511 * lpfc_sli4_next_xritag - Get an xritag for the io 12418 * lpfc_sli4_next_xritag - Get an xritag for the io
11512 * @phba: Pointer to HBA context object. 12419 * @phba: Pointer to HBA context object.
11513 * 12420 *
@@ -11520,30 +12427,23 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
11520uint16_t 12427uint16_t
11521lpfc_sli4_next_xritag(struct lpfc_hba *phba) 12428lpfc_sli4_next_xritag(struct lpfc_hba *phba)
11522{ 12429{
11523 uint16_t xritag; 12430 uint16_t xri_index;
11524 12431
11525 spin_lock_irq(&phba->hbalock); 12432 xri_index = lpfc_sli4_alloc_xri(phba);
11526 xritag = phba->sli4_hba.next_xri; 12433 if (xri_index != NO_XRI)
11527 if ((xritag != (uint16_t) -1) && xritag < 12434 return xri_index;
11528 (phba->sli4_hba.max_cfg_param.max_xri 12435
11529 + phba->sli4_hba.max_cfg_param.xri_base)) { 12436 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11530 phba->sli4_hba.next_xri++;
11531 phba->sli4_hba.max_cfg_param.xri_used++;
11532 spin_unlock_irq(&phba->hbalock);
11533 return xritag;
11534 }
11535 spin_unlock_irq(&phba->hbalock);
11536 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11537 "2004 Failed to allocate XRI.last XRITAG is %d" 12437 "2004 Failed to allocate XRI.last XRITAG is %d"
11538 " Max XRI is %d, Used XRI is %d\n", 12438 " Max XRI is %d, Used XRI is %d\n",
11539 phba->sli4_hba.next_xri, 12439 xri_index,
11540 phba->sli4_hba.max_cfg_param.max_xri, 12440 phba->sli4_hba.max_cfg_param.max_xri,
11541 phba->sli4_hba.max_cfg_param.xri_used); 12441 phba->sli4_hba.max_cfg_param.xri_used);
11542 return -1; 12442 return NO_XRI;
11543} 12443}
11544 12444
11545/** 12445/**
11546 * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware. 12446 * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
11547 * @phba: pointer to lpfc hba data structure. 12447 * @phba: pointer to lpfc hba data structure.
11548 * 12448 *
11549 * This routine is invoked to post a block of driver's sgl pages to the 12449 * This routine is invoked to post a block of driver's sgl pages to the
@@ -11552,7 +12452,7 @@ lpfc_sli4_next_xritag(struct lpfc_hba *phba)
11552 * stopped. 12452 * stopped.
11553 **/ 12453 **/
11554int 12454int
11555lpfc_sli4_post_sgl_list(struct lpfc_hba *phba) 12455lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba)
11556{ 12456{
11557 struct lpfc_sglq *sglq_entry; 12457 struct lpfc_sglq *sglq_entry;
11558 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 12458 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
@@ -11561,7 +12461,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
11561 LPFC_MBOXQ_t *mbox; 12461 LPFC_MBOXQ_t *mbox;
11562 uint32_t reqlen, alloclen, pg_pairs; 12462 uint32_t reqlen, alloclen, pg_pairs;
11563 uint32_t mbox_tmo; 12463 uint32_t mbox_tmo;
11564 uint16_t xritag_start = 0; 12464 uint16_t xritag_start = 0, lxri = 0;
11565 int els_xri_cnt, rc = 0; 12465 int els_xri_cnt, rc = 0;
11566 uint32_t shdr_status, shdr_add_status; 12466 uint32_t shdr_status, shdr_add_status;
11567 union lpfc_sli4_cfg_shdr *shdr; 12467 union lpfc_sli4_cfg_shdr *shdr;
@@ -11578,11 +12478,8 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
11578 return -ENOMEM; 12478 return -ENOMEM;
11579 } 12479 }
11580 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12480 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11581 if (!mbox) { 12481 if (!mbox)
11582 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11583 "2560 Failed to allocate mbox cmd memory\n");
11584 return -ENOMEM; 12482 return -ENOMEM;
11585 }
11586 12483
11587 /* Allocate DMA memory and set up the non-embedded mailbox command */ 12484 /* Allocate DMA memory and set up the non-embedded mailbox command */
11588 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 12485 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
@@ -11597,15 +12494,30 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
11597 lpfc_sli4_mbox_cmd_free(phba, mbox); 12494 lpfc_sli4_mbox_cmd_free(phba, mbox);
11598 return -ENOMEM; 12495 return -ENOMEM;
11599 } 12496 }
11600 /* Get the first SGE entry from the non-embedded DMA memory */
11601 viraddr = mbox->sge_array->addr[0];
11602
11603 /* Set up the SGL pages in the non-embedded DMA pages */ 12497 /* Set up the SGL pages in the non-embedded DMA pages */
12498 viraddr = mbox->sge_array->addr[0];
11604 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 12499 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
11605 sgl_pg_pairs = &sgl->sgl_pg_pairs; 12500 sgl_pg_pairs = &sgl->sgl_pg_pairs;
11606 12501
11607 for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) { 12502 for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) {
11608 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs]; 12503 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs];
12504
12505 /*
12506 * Assign the sglq a physical xri only if the driver has not
12507 * initialized those resources. A port reset only needs
12508 * the sglq's posted.
12509 */
12510 if (bf_get(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
12511 LPFC_XRI_RSRC_RDY) {
12512 lxri = lpfc_sli4_next_xritag(phba);
12513 if (lxri == NO_XRI) {
12514 lpfc_sli4_mbox_cmd_free(phba, mbox);
12515 return -ENOMEM;
12516 }
12517 sglq_entry->sli4_lxritag = lxri;
12518 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
12519 }
12520
11609 /* Set up the sge entry */ 12521 /* Set up the sge entry */
11610 sgl_pg_pairs->sgl_pg0_addr_lo = 12522 sgl_pg_pairs->sgl_pg0_addr_lo =
11611 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 12523 cpu_to_le32(putPaddrLow(sglq_entry->phys));
@@ -11615,16 +12527,17 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
11615 cpu_to_le32(putPaddrLow(0)); 12527 cpu_to_le32(putPaddrLow(0));
11616 sgl_pg_pairs->sgl_pg1_addr_hi = 12528 sgl_pg_pairs->sgl_pg1_addr_hi =
11617 cpu_to_le32(putPaddrHigh(0)); 12529 cpu_to_le32(putPaddrHigh(0));
12530
11618 /* Keep the first xritag on the list */ 12531 /* Keep the first xritag on the list */
11619 if (pg_pairs == 0) 12532 if (pg_pairs == 0)
11620 xritag_start = sglq_entry->sli4_xritag; 12533 xritag_start = sglq_entry->sli4_xritag;
11621 sgl_pg_pairs++; 12534 sgl_pg_pairs++;
11622 } 12535 }
12536
12537 /* Complete initialization and perform endian conversion. */
11623 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 12538 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
11624 bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt); 12539 bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt);
11625 /* Perform endian conversion if necessary */
11626 sgl->word0 = cpu_to_le32(sgl->word0); 12540 sgl->word0 = cpu_to_le32(sgl->word0);
11627
11628 if (!phba->sli4_hba.intr_enable) 12541 if (!phba->sli4_hba.intr_enable)
11629 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12542 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
11630 else { 12543 else {
@@ -11643,6 +12556,181 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
11643 shdr_status, shdr_add_status, rc); 12556 shdr_status, shdr_add_status, rc);
11644 rc = -ENXIO; 12557 rc = -ENXIO;
11645 } 12558 }
12559
12560 if (rc == 0)
12561 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
12562 LPFC_XRI_RSRC_RDY);
12563 return rc;
12564}
12565
12566/**
12567 * lpfc_sli4_post_els_sgl_list_ext - post a block of ELS sgls to the port.
12568 * @phba: pointer to lpfc hba data structure.
12569 *
12570 * This routine is invoked to post a block of driver's sgl pages to the
12571 * HBA using non-embedded mailbox command. No Lock is held. This routine
12572 * is only called when the driver is loading and after all IO has been
12573 * stopped.
12574 **/
12575int
12576lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
12577{
12578 struct lpfc_sglq *sglq_entry;
12579 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
12580 struct sgl_page_pairs *sgl_pg_pairs;
12581 void *viraddr;
12582 LPFC_MBOXQ_t *mbox;
12583 uint32_t reqlen, alloclen, index;
12584 uint32_t mbox_tmo;
12585 uint16_t rsrc_start, rsrc_size, els_xri_cnt;
12586 uint16_t xritag_start = 0, lxri = 0;
12587 struct lpfc_rsrc_blks *rsrc_blk;
12588 int cnt, ttl_cnt, rc = 0;
12589 int loop_cnt;
12590 uint32_t shdr_status, shdr_add_status;
12591 union lpfc_sli4_cfg_shdr *shdr;
12592
12593 /* The number of sgls to be posted */
12594 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
12595
12596 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
12597 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
12598 if (reqlen > SLI4_PAGE_SIZE) {
12599 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12600 "2989 Block sgl registration required DMA "
12601 "size (%d) great than a page\n", reqlen);
12602 return -ENOMEM;
12603 }
12604
12605 cnt = 0;
12606 ttl_cnt = 0;
12607 list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
12608 list) {
12609 rsrc_start = rsrc_blk->rsrc_start;
12610 rsrc_size = rsrc_blk->rsrc_size;
12611
12612 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12613 "3014 Working ELS Extent start %d, cnt %d\n",
12614 rsrc_start, rsrc_size);
12615
12616 loop_cnt = min(els_xri_cnt, rsrc_size);
12617 if (ttl_cnt + loop_cnt >= els_xri_cnt) {
12618 loop_cnt = els_xri_cnt - ttl_cnt;
12619 ttl_cnt = els_xri_cnt;
12620 }
12621
12622 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12623 if (!mbox)
12624 return -ENOMEM;
12625 /*
12626 * Allocate DMA memory and set up the non-embedded mailbox
12627 * command.
12628 */
12629 alloclen = lpfc_sli4_config(phba, mbox,
12630 LPFC_MBOX_SUBSYSTEM_FCOE,
12631 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
12632 reqlen, LPFC_SLI4_MBX_NEMBED);
12633 if (alloclen < reqlen) {
12634 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12635 "2987 Allocated DMA memory size (%d) "
12636 "is less than the requested DMA memory "
12637 "size (%d)\n", alloclen, reqlen);
12638 lpfc_sli4_mbox_cmd_free(phba, mbox);
12639 return -ENOMEM;
12640 }
12641
12642 /* Set up the SGL pages in the non-embedded DMA pages */
12643 viraddr = mbox->sge_array->addr[0];
12644 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
12645 sgl_pg_pairs = &sgl->sgl_pg_pairs;
12646
12647 /*
12648 * The starting resource may not begin at zero. Control
12649 * the loop variants via the block resource parameters,
12650 * but handle the sge pointers with a zero-based index
12651 * that doesn't get reset per loop pass.
12652 */
12653 for (index = rsrc_start;
12654 index < rsrc_start + loop_cnt;
12655 index++) {
12656 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[cnt];
12657
12658 /*
12659 * Assign the sglq a physical xri only if the driver
12660 * has not initialized those resources. A port reset
12661 * only needs the sglq's posted.
12662 */
12663 if (bf_get(lpfc_xri_rsrc_rdy,
12664 &phba->sli4_hba.sli4_flags) !=
12665 LPFC_XRI_RSRC_RDY) {
12666 lxri = lpfc_sli4_next_xritag(phba);
12667 if (lxri == NO_XRI) {
12668 lpfc_sli4_mbox_cmd_free(phba, mbox);
12669 rc = -ENOMEM;
12670 goto err_exit;
12671 }
12672 sglq_entry->sli4_lxritag = lxri;
12673 sglq_entry->sli4_xritag =
12674 phba->sli4_hba.xri_ids[lxri];
12675 }
12676
12677 /* Set up the sge entry */
12678 sgl_pg_pairs->sgl_pg0_addr_lo =
12679 cpu_to_le32(putPaddrLow(sglq_entry->phys));
12680 sgl_pg_pairs->sgl_pg0_addr_hi =
12681 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
12682 sgl_pg_pairs->sgl_pg1_addr_lo =
12683 cpu_to_le32(putPaddrLow(0));
12684 sgl_pg_pairs->sgl_pg1_addr_hi =
12685 cpu_to_le32(putPaddrHigh(0));
12686
12687 /* Track the starting physical XRI for the mailbox. */
12688 if (index == rsrc_start)
12689 xritag_start = sglq_entry->sli4_xritag;
12690 sgl_pg_pairs++;
12691 cnt++;
12692 }
12693
12694 /* Complete initialization and perform endian conversion. */
12695 rsrc_blk->rsrc_used += loop_cnt;
12696 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
12697 bf_set(lpfc_post_sgl_pages_xricnt, sgl, loop_cnt);
12698 sgl->word0 = cpu_to_le32(sgl->word0);
12699
12700 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12701 "3015 Post ELS Extent SGL, start %d, "
12702 "cnt %d, used %d\n",
12703 xritag_start, loop_cnt, rsrc_blk->rsrc_used);
12704 if (!phba->sli4_hba.intr_enable)
12705 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12706 else {
12707 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
12708 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
12709 }
12710 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
12711 shdr_status = bf_get(lpfc_mbox_hdr_status,
12712 &shdr->response);
12713 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
12714 &shdr->response);
12715 if (rc != MBX_TIMEOUT)
12716 lpfc_sli4_mbox_cmd_free(phba, mbox);
12717 if (shdr_status || shdr_add_status || rc) {
12718 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12719 "2988 POST_SGL_BLOCK mailbox "
12720 "command failed status x%x "
12721 "add_status x%x mbx status x%x\n",
12722 shdr_status, shdr_add_status, rc);
12723 rc = -ENXIO;
12724 goto err_exit;
12725 }
12726 if (ttl_cnt >= els_xri_cnt)
12727 break;
12728 }
12729
12730 err_exit:
12731 if (rc == 0)
12732 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
12733 LPFC_XRI_RSRC_RDY);
11646 return rc; 12734 return rc;
11647} 12735}
11648 12736
@@ -11703,6 +12791,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
11703 lpfc_sli4_mbox_cmd_free(phba, mbox); 12791 lpfc_sli4_mbox_cmd_free(phba, mbox);
11704 return -ENOMEM; 12792 return -ENOMEM;
11705 } 12793 }
12794
11706 /* Get the first SGE entry from the non-embedded DMA memory */ 12795 /* Get the first SGE entry from the non-embedded DMA memory */
11707 viraddr = mbox->sge_array->addr[0]; 12796 viraddr = mbox->sge_array->addr[0];
11708 12797
@@ -11758,6 +12847,169 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
11758} 12847}
11759 12848
11760/** 12849/**
12850 * lpfc_sli4_post_scsi_sgl_blk_ext - post a block of scsi sgls to the port.
12851 * @phba: pointer to lpfc hba data structure.
12852 * @sblist: pointer to scsi buffer list.
12853 * @count: number of scsi buffers on the list.
12854 *
12855 * This routine is invoked to post a block of @count scsi sgl pages from a
12856 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
12857 * No Lock is held.
12858 *
12859 **/
12860int
12861lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *phba, struct list_head *sblist,
12862 int cnt)
12863{
12864 struct lpfc_scsi_buf *psb = NULL;
12865 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
12866 struct sgl_page_pairs *sgl_pg_pairs;
12867 void *viraddr;
12868 LPFC_MBOXQ_t *mbox;
12869 uint32_t reqlen, alloclen, pg_pairs;
12870 uint32_t mbox_tmo;
12871 uint16_t xri_start = 0, scsi_xri_start;
12872 uint16_t rsrc_range;
12873 int rc = 0, avail_cnt;
12874 uint32_t shdr_status, shdr_add_status;
12875 dma_addr_t pdma_phys_bpl1;
12876 union lpfc_sli4_cfg_shdr *shdr;
12877 struct lpfc_rsrc_blks *rsrc_blk;
12878 uint32_t xri_cnt = 0;
12879
12880 /* Calculate the total requested length of the dma memory */
12881 reqlen = cnt * sizeof(struct sgl_page_pairs) +
12882 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
12883 if (reqlen > SLI4_PAGE_SIZE) {
12884 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12885 "2932 Block sgl registration required DMA "
12886 "size (%d) great than a page\n", reqlen);
12887 return -ENOMEM;
12888 }
12889
12890 /*
12891 * The use of extents requires the driver to post the sgl headers
12892 * in multiple postings to meet the contiguous resource assignment.
12893 */
12894 psb = list_prepare_entry(psb, sblist, list);
12895 scsi_xri_start = phba->sli4_hba.scsi_xri_start;
12896 list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
12897 list) {
12898 rsrc_range = rsrc_blk->rsrc_start + rsrc_blk->rsrc_size;
12899 if (rsrc_range < scsi_xri_start)
12900 continue;
12901 else if (rsrc_blk->rsrc_used >= rsrc_blk->rsrc_size)
12902 continue;
12903 else
12904 avail_cnt = rsrc_blk->rsrc_size - rsrc_blk->rsrc_used;
12905
12906 reqlen = (avail_cnt * sizeof(struct sgl_page_pairs)) +
12907 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
12908 /*
12909 * Allocate DMA memory and set up the non-embedded mailbox
12910 * command. The mbox is used to post an SGL page per loop
12911 * but the DMA memory has a use-once semantic so the mailbox
12912 * is used and freed per loop pass.
12913 */
12914 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12915 if (!mbox) {
12916 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12917 "2933 Failed to allocate mbox cmd "
12918 "memory\n");
12919 return -ENOMEM;
12920 }
12921 alloclen = lpfc_sli4_config(phba, mbox,
12922 LPFC_MBOX_SUBSYSTEM_FCOE,
12923 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
12924 reqlen,
12925 LPFC_SLI4_MBX_NEMBED);
12926 if (alloclen < reqlen) {
12927 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12928 "2934 Allocated DMA memory size (%d) "
12929 "is less than the requested DMA memory "
12930 "size (%d)\n", alloclen, reqlen);
12931 lpfc_sli4_mbox_cmd_free(phba, mbox);
12932 return -ENOMEM;
12933 }
12934
12935 /* Get the first SGE entry from the non-embedded DMA memory */
12936 viraddr = mbox->sge_array->addr[0];
12937
12938 /* Set up the SGL pages in the non-embedded DMA pages */
12939 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
12940 sgl_pg_pairs = &sgl->sgl_pg_pairs;
12941
12942 /* pg_pairs tracks posted SGEs per loop iteration. */
12943 pg_pairs = 0;
12944 list_for_each_entry_continue(psb, sblist, list) {
12945 /* Set up the sge entry */
12946 sgl_pg_pairs->sgl_pg0_addr_lo =
12947 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
12948 sgl_pg_pairs->sgl_pg0_addr_hi =
12949 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
12950 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
12951 pdma_phys_bpl1 = psb->dma_phys_bpl +
12952 SGL_PAGE_SIZE;
12953 else
12954 pdma_phys_bpl1 = 0;
12955 sgl_pg_pairs->sgl_pg1_addr_lo =
12956 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
12957 sgl_pg_pairs->sgl_pg1_addr_hi =
12958 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
12959 /* Keep the first xri for this extent. */
12960 if (pg_pairs == 0)
12961 xri_start = psb->cur_iocbq.sli4_xritag;
12962 sgl_pg_pairs++;
12963 pg_pairs++;
12964 xri_cnt++;
12965
12966 /*
12967 * Track two exit conditions - the loop has constructed
12968 * all of the caller's SGE pairs or all available
12969 * resource IDs in this extent are consumed.
12970 */
12971 if ((xri_cnt == cnt) || (pg_pairs >= avail_cnt))
12972 break;
12973 }
12974 rsrc_blk->rsrc_used += pg_pairs;
12975 bf_set(lpfc_post_sgl_pages_xri, sgl, xri_start);
12976 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
12977
12978 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12979 "3016 Post SCSI Extent SGL, start %d, cnt %d "
12980 "blk use %d\n",
12981 xri_start, pg_pairs, rsrc_blk->rsrc_used);
12982 /* Perform endian conversion if necessary */
12983 sgl->word0 = cpu_to_le32(sgl->word0);
12984 if (!phba->sli4_hba.intr_enable)
12985 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12986 else {
12987 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
12988 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
12989 }
12990 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
12991 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12992 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
12993 &shdr->response);
12994 if (rc != MBX_TIMEOUT)
12995 lpfc_sli4_mbox_cmd_free(phba, mbox);
12996 if (shdr_status || shdr_add_status || rc) {
12997 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12998 "2935 POST_SGL_BLOCK mailbox command "
12999 "failed status x%x add_status x%x "
13000 "mbx status x%x\n",
13001 shdr_status, shdr_add_status, rc);
13002 return -ENXIO;
13003 }
13004
13005 /* Post only what is requested. */
13006 if (xri_cnt >= cnt)
13007 break;
13008 }
13009 return rc;
13010}
13011
13012/**
11761 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle 13013 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
11762 * @phba: pointer to lpfc_hba struct that the frame was received on 13014 * @phba: pointer to lpfc_hba struct that the frame was received on
11763 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 13015 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
@@ -12147,6 +13399,28 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
12147} 13399}
12148 13400
12149/** 13401/**
13402 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
13403 * @phba: Pointer to HBA context object.
13404 * @xri: xri id in transaction.
13405 *
13406 * This function validates the xri maps to the known range of XRIs allocated an
13407 * used by the driver.
13408 **/
13409static uint16_t
13410lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
13411 uint16_t xri)
13412{
13413 int i;
13414
13415 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
13416 if (xri == phba->sli4_hba.xri_ids[i])
13417 return i;
13418 }
13419 return NO_XRI;
13420}
13421
13422
13423/**
12150 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort 13424 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
12151 * @phba: Pointer to HBA context object. 13425 * @phba: Pointer to HBA context object.
12152 * @fc_hdr: pointer to a FC frame header. 13426 * @fc_hdr: pointer to a FC frame header.
@@ -12179,9 +13453,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
12179 "SID:x%x\n", oxid, sid); 13453 "SID:x%x\n", oxid, sid);
12180 return; 13454 return;
12181 } 13455 }
12182 if (rxid >= phba->sli4_hba.max_cfg_param.xri_base 13456 if (lpfc_sli4_xri_inrange(phba, rxid))
12183 && rxid <= (phba->sli4_hba.max_cfg_param.max_xri
12184 + phba->sli4_hba.max_cfg_param.xri_base))
12185 lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0); 13457 lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0);
12186 13458
12187 /* Allocate buffer for rsp iocb */ 13459 /* Allocate buffer for rsp iocb */
@@ -12204,12 +13476,13 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
12204 icmd->ulpBdeCount = 0; 13476 icmd->ulpBdeCount = 0;
12205 icmd->ulpLe = 1; 13477 icmd->ulpLe = 1;
12206 icmd->ulpClass = CLASS3; 13478 icmd->ulpClass = CLASS3;
12207 icmd->ulpContext = ndlp->nlp_rpi; 13479 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
12208 ctiocb->context1 = ndlp; 13480 ctiocb->context1 = ndlp;
12209 13481
12210 ctiocb->iocb_cmpl = NULL; 13482 ctiocb->iocb_cmpl = NULL;
12211 ctiocb->vport = phba->pport; 13483 ctiocb->vport = phba->pport;
12212 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; 13484 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
13485 ctiocb->sli4_lxritag = NO_XRI;
12213 ctiocb->sli4_xritag = NO_XRI; 13486 ctiocb->sli4_xritag = NO_XRI;
12214 13487
12215 /* If the oxid maps to the FCP XRI range or if it is out of range, 13488 /* If the oxid maps to the FCP XRI range or if it is out of range,
@@ -12390,8 +13663,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
12390 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 13663 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
12391 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 13664 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
12392 first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id); 13665 first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
12393 first_iocbq->iocb.unsli3.rcvsli3.vpi = 13666 /* iocbq is prepped for internal consumption. Logical vpi. */
12394 vport->vpi + vport->phba->vpi_base; 13667 first_iocbq->iocb.unsli3.rcvsli3.vpi = vport->vpi;
12395 /* put the first buffer into the first IOCBq */ 13668 /* put the first buffer into the first IOCBq */
12396 first_iocbq->context2 = &seq_dmabuf->dbuf; 13669 first_iocbq->context2 = &seq_dmabuf->dbuf;
12397 first_iocbq->context3 = NULL; 13670 first_iocbq->context3 = NULL;
@@ -12471,7 +13744,7 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
12471 &phba->sli.ring[LPFC_ELS_RING], 13744 &phba->sli.ring[LPFC_ELS_RING],
12472 iocbq, fc_hdr->fh_r_ctl, 13745 iocbq, fc_hdr->fh_r_ctl,
12473 fc_hdr->fh_type)) 13746 fc_hdr->fh_type))
12474 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13747 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12475 "2540 Ring %d handler: unexpected Rctl " 13748 "2540 Ring %d handler: unexpected Rctl "
12476 "x%x Type x%x received\n", 13749 "x%x Type x%x received\n",
12477 LPFC_ELS_RING, 13750 LPFC_ELS_RING,
@@ -12568,9 +13841,24 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
12568{ 13841{
12569 struct lpfc_rpi_hdr *rpi_page; 13842 struct lpfc_rpi_hdr *rpi_page;
12570 uint32_t rc = 0; 13843 uint32_t rc = 0;
13844 uint16_t lrpi = 0;
13845
13846 /* SLI4 ports that support extents do not require RPI headers. */
13847 if (!phba->sli4_hba.rpi_hdrs_in_use)
13848 goto exit;
13849 if (phba->sli4_hba.extents_in_use)
13850 return -EIO;
12571 13851
12572 /* Post all rpi memory regions to the port. */
12573 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 13852 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
13853 /*
13854 * Assign the rpi headers a physical rpi only if the driver
13855 * has not initialized those resources. A port reset only
13856 * needs the headers posted.
13857 */
13858 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
13859 LPFC_RPI_RSRC_RDY)
13860 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
13861
12574 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); 13862 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
12575 if (rc != MBX_SUCCESS) { 13863 if (rc != MBX_SUCCESS) {
12576 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13864 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -12581,6 +13869,9 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
12581 } 13869 }
12582 } 13870 }
12583 13871
13872 exit:
13873 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
13874 LPFC_RPI_RSRC_RDY);
12584 return rc; 13875 return rc;
12585} 13876}
12586 13877
@@ -12604,10 +13895,15 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
12604 LPFC_MBOXQ_t *mboxq; 13895 LPFC_MBOXQ_t *mboxq;
12605 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; 13896 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
12606 uint32_t rc = 0; 13897 uint32_t rc = 0;
12607 uint32_t mbox_tmo;
12608 uint32_t shdr_status, shdr_add_status; 13898 uint32_t shdr_status, shdr_add_status;
12609 union lpfc_sli4_cfg_shdr *shdr; 13899 union lpfc_sli4_cfg_shdr *shdr;
12610 13900
13901 /* SLI4 ports that support extents do not require RPI headers. */
13902 if (!phba->sli4_hba.rpi_hdrs_in_use)
13903 return rc;
13904 if (phba->sli4_hba.extents_in_use)
13905 return -EIO;
13906
12611 /* The port is notified of the header region via a mailbox command. */ 13907 /* The port is notified of the header region via a mailbox command. */
12612 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13908 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12613 if (!mboxq) { 13909 if (!mboxq) {
@@ -12619,16 +13915,19 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
12619 13915
12620 /* Post all rpi memory regions to the port. */ 13916 /* Post all rpi memory regions to the port. */
12621 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; 13917 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
12622 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
12623 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 13918 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
12624 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, 13919 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
12625 sizeof(struct lpfc_mbx_post_hdr_tmpl) - 13920 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
12626 sizeof(struct lpfc_sli4_cfg_mhdr), 13921 sizeof(struct lpfc_sli4_cfg_mhdr),
12627 LPFC_SLI4_MBX_EMBED); 13922 LPFC_SLI4_MBX_EMBED);
12628 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, 13923
12629 hdr_tmpl, rpi_page->page_count); 13924
13925 /* Post the physical rpi to the port for this rpi header. */
12630 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, 13926 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
12631 rpi_page->start_rpi); 13927 rpi_page->start_rpi);
13928 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
13929 hdr_tmpl, rpi_page->page_count);
13930
12632 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); 13931 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
12633 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); 13932 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
12634 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 13933 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
@@ -12663,22 +13962,21 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
12663int 13962int
12664lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) 13963lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
12665{ 13964{
12666 int rpi; 13965 unsigned long rpi;
12667 uint16_t max_rpi, rpi_base, rpi_limit; 13966 uint16_t max_rpi, rpi_limit;
12668 uint16_t rpi_remaining; 13967 uint16_t rpi_remaining, lrpi = 0;
12669 struct lpfc_rpi_hdr *rpi_hdr; 13968 struct lpfc_rpi_hdr *rpi_hdr;
12670 13969
12671 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 13970 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
12672 rpi_base = phba->sli4_hba.max_cfg_param.rpi_base;
12673 rpi_limit = phba->sli4_hba.next_rpi; 13971 rpi_limit = phba->sli4_hba.next_rpi;
12674 13972
12675 /* 13973 /*
12676 * The valid rpi range is not guaranteed to be zero-based. Start 13974 * Fetch the next logical rpi. Because this index is logical,
12677 * the search at the rpi_base as reported by the port. 13975 * the driver starts at 0 each time.
12678 */ 13976 */
12679 spin_lock_irq(&phba->hbalock); 13977 spin_lock_irq(&phba->hbalock);
12680 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base); 13978 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
12681 if (rpi >= rpi_limit || rpi < rpi_base) 13979 if (rpi >= rpi_limit)
12682 rpi = LPFC_RPI_ALLOC_ERROR; 13980 rpi = LPFC_RPI_ALLOC_ERROR;
12683 else { 13981 else {
12684 set_bit(rpi, phba->sli4_hba.rpi_bmask); 13982 set_bit(rpi, phba->sli4_hba.rpi_bmask);
@@ -12688,7 +13986,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
12688 13986
12689 /* 13987 /*
12690 * Don't try to allocate more rpi header regions if the device limit 13988 * Don't try to allocate more rpi header regions if the device limit
12691 * on available rpis max has been exhausted. 13989 * has been exhausted.
12692 */ 13990 */
12693 if ((rpi == LPFC_RPI_ALLOC_ERROR) && 13991 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
12694 (phba->sli4_hba.rpi_count >= max_rpi)) { 13992 (phba->sli4_hba.rpi_count >= max_rpi)) {
@@ -12697,13 +13995,21 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
12697 } 13995 }
12698 13996
12699 /* 13997 /*
13998 * RPI header postings are not required for SLI4 ports capable of
13999 * extents.
14000 */
14001 if (!phba->sli4_hba.rpi_hdrs_in_use) {
14002 spin_unlock_irq(&phba->hbalock);
14003 return rpi;
14004 }
14005
14006 /*
12700 * If the driver is running low on rpi resources, allocate another 14007 * If the driver is running low on rpi resources, allocate another
12701 * page now. Note that the next_rpi value is used because 14008 * page now. Note that the next_rpi value is used because
12702 * it represents how many are actually in use whereas max_rpi notes 14009 * it represents how many are actually in use whereas max_rpi notes
12703 * how many are supported max by the device. 14010 * how many are supported max by the device.
12704 */ 14011 */
12705 rpi_remaining = phba->sli4_hba.next_rpi - rpi_base - 14012 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
12706 phba->sli4_hba.rpi_count;
12707 spin_unlock_irq(&phba->hbalock); 14013 spin_unlock_irq(&phba->hbalock);
12708 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { 14014 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
12709 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 14015 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
@@ -12712,6 +14018,8 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
12712 "2002 Error Could not grow rpi " 14018 "2002 Error Could not grow rpi "
12713 "count\n"); 14019 "count\n");
12714 } else { 14020 } else {
14021 lrpi = rpi_hdr->start_rpi;
14022 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
12715 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); 14023 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
12716 } 14024 }
12717 } 14025 }
@@ -12761,6 +14069,8 @@ void
12761lpfc_sli4_remove_rpis(struct lpfc_hba *phba) 14069lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
12762{ 14070{
12763 kfree(phba->sli4_hba.rpi_bmask); 14071 kfree(phba->sli4_hba.rpi_bmask);
14072 kfree(phba->sli4_hba.rpi_ids);
14073 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
12764} 14074}
12765 14075
12766/** 14076/**
@@ -13744,7 +15054,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
13744 * never happen 15054 * never happen
13745 */ 15055 */
13746 sglq = __lpfc_clear_active_sglq(phba, 15056 sglq = __lpfc_clear_active_sglq(phba,
13747 sglq->sli4_xritag); 15057 sglq->sli4_lxritag);
13748 spin_unlock_irqrestore(&phba->hbalock, iflags); 15058 spin_unlock_irqrestore(&phba->hbalock, iflags);
13749 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15059 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13750 "2823 txq empty and txq_cnt is %d\n ", 15060 "2823 txq empty and txq_cnt is %d\n ",
@@ -13756,6 +15066,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
13756 /* The xri and iocb resources secured, 15066 /* The xri and iocb resources secured,
13757 * attempt to issue request 15067 * attempt to issue request
13758 */ 15068 */
15069 piocbq->sli4_lxritag = sglq->sli4_lxritag;
13759 piocbq->sli4_xritag = sglq->sli4_xritag; 15070 piocbq->sli4_xritag = sglq->sli4_xritag;
13760 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq)) 15071 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
13761 fail_msg = "to convert bpl to sgl"; 15072 fail_msg = "to convert bpl to sgl";
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 453577c21c14..a0075b0af142 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -52,6 +52,7 @@ struct lpfc_iocbq {
52 struct list_head clist; 52 struct list_head clist;
53 struct list_head dlist; 53 struct list_head dlist;
54 uint16_t iotag; /* pre-assigned IO tag */ 54 uint16_t iotag; /* pre-assigned IO tag */
55 uint16_t sli4_lxritag; /* logical pre-assigned XRI. */
55 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ 56 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
56 struct lpfc_cq_event cq_event; 57 struct lpfc_cq_event cq_event;
57 58
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 03d25a9d3bf6..4b1703554a26 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -310,7 +310,6 @@ struct lpfc_max_cfg_param {
310 uint16_t vfi_base; 310 uint16_t vfi_base;
311 uint16_t vfi_used; 311 uint16_t vfi_used;
312 uint16_t max_fcfi; 312 uint16_t max_fcfi;
313 uint16_t fcfi_base;
314 uint16_t fcfi_used; 313 uint16_t fcfi_used;
315 uint16_t max_eq; 314 uint16_t max_eq;
316 uint16_t max_rq; 315 uint16_t max_rq;
@@ -449,10 +448,13 @@ struct lpfc_sli4_hba {
449 uint32_t intr_enable; 448 uint32_t intr_enable;
450 struct lpfc_bmbx bmbx; 449 struct lpfc_bmbx bmbx;
451 struct lpfc_max_cfg_param max_cfg_param; 450 struct lpfc_max_cfg_param max_cfg_param;
451 uint16_t extents_in_use; /* must allocate resource extents. */
452 uint16_t rpi_hdrs_in_use; /* must post rpi hdrs if set. */
452 uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */ 453 uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
453 uint16_t next_rpi; 454 uint16_t next_rpi;
454 uint16_t scsi_xri_max; 455 uint16_t scsi_xri_max;
455 uint16_t scsi_xri_cnt; 456 uint16_t scsi_xri_cnt;
457 uint16_t scsi_xri_start;
456 struct list_head lpfc_free_sgl_list; 458 struct list_head lpfc_free_sgl_list;
457 struct list_head lpfc_sgl_list; 459 struct list_head lpfc_sgl_list;
458 struct lpfc_sglq **lpfc_els_sgl_array; 460 struct lpfc_sglq **lpfc_els_sgl_array;
@@ -463,7 +465,17 @@ struct lpfc_sli4_hba {
463 struct lpfc_sglq **lpfc_sglq_active_list; 465 struct lpfc_sglq **lpfc_sglq_active_list;
464 struct list_head lpfc_rpi_hdr_list; 466 struct list_head lpfc_rpi_hdr_list;
465 unsigned long *rpi_bmask; 467 unsigned long *rpi_bmask;
468 uint16_t *rpi_ids;
466 uint16_t rpi_count; 469 uint16_t rpi_count;
470 struct list_head lpfc_rpi_blk_list;
471 unsigned long *xri_bmask;
472 uint16_t *xri_ids;
473 uint16_t xri_count;
474 struct list_head lpfc_xri_blk_list;
475 unsigned long *vfi_bmask;
476 uint16_t *vfi_ids;
477 uint16_t vfi_count;
478 struct list_head lpfc_vfi_blk_list;
467 struct lpfc_sli4_flags sli4_flags; 479 struct lpfc_sli4_flags sli4_flags;
468 struct list_head sp_queue_event; 480 struct list_head sp_queue_event;
469 struct list_head sp_cqe_event_pool; 481 struct list_head sp_cqe_event_pool;
@@ -496,6 +508,7 @@ struct lpfc_sglq {
496 enum lpfc_sgl_state state; 508 enum lpfc_sgl_state state;
497 struct lpfc_nodelist *ndlp; /* ndlp associated with IO */ 509 struct lpfc_nodelist *ndlp; /* ndlp associated with IO */
498 uint16_t iotag; /* pre-assigned IO tag */ 510 uint16_t iotag; /* pre-assigned IO tag */
511 uint16_t sli4_lxritag; /* logical pre-assigned xri. */
499 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ 512 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
500 struct sli4_sge *sgl; /* pre-assigned SGL */ 513 struct sli4_sge *sgl; /* pre-assigned SGL */
501 void *virt; /* virtual address. */ 514 void *virt; /* virtual address. */
@@ -510,6 +523,13 @@ struct lpfc_rpi_hdr {
510 uint32_t start_rpi; 523 uint32_t start_rpi;
511}; 524};
512 525
526struct lpfc_rsrc_blks {
527 struct list_head list;
528 uint16_t rsrc_start;
529 uint16_t rsrc_size;
530 uint16_t rsrc_used;
531};
532
513/* 533/*
514 * SLI4 specific function prototypes 534 * SLI4 specific function prototypes
515 */ 535 */
@@ -549,8 +569,11 @@ int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
549int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *); 569int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
550uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *); 570uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
551int lpfc_sli4_post_async_mbox(struct lpfc_hba *); 571int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
552int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba); 572int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba);
573int lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba);
553int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int); 574int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
575int lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *, struct list_head *,
576 int);
554struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *); 577struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
555struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *); 578struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
556void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *); 579void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 30ba5440c67a..1feb551a57bc 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -83,7 +83,7 @@ inline void lpfc_vport_set_state(struct lpfc_vport *vport,
83static int 83static int
84lpfc_alloc_vpi(struct lpfc_hba *phba) 84lpfc_alloc_vpi(struct lpfc_hba *phba)
85{ 85{
86 int vpi; 86 unsigned long vpi;
87 87
88 spin_lock_irq(&phba->hbalock); 88 spin_lock_irq(&phba->hbalock);
89 /* Start at bit 1 because vpi zero is reserved for the physical port */ 89 /* Start at bit 1 because vpi zero is reserved for the physical port */