aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Smart <jsmart2021@gmail.com>2017-05-15 18:20:45 -0400
committerMartin K. Petersen <martin.petersen@oracle.com>2017-05-16 21:21:47 -0400
commit6c621a2229b084da0d926967f84b059a10c26ede (patch)
tree80c0e96ed57c386177a371039d36d3312f9951c5
parent3c603be9798758dde794daa622e0f7017dbff3a7 (diff)
scsi: lpfc: Separate NVMET RQ buffer posting from IO resources SGL/iocbq/context
Currently IO resources are mapped 1 to 1 with RQ buffers posted Added logic to separate RQE buffers from IO op resources (sgl/iocbq/context). During initialization, the driver will determine how many SGLs it will allocate for NVMET (based on what the firmware reports) and associate a NVMET IOCBq and NVMET context structure with each one. Now that hdr/data buffers are immediately reposted back to the RQ, 512 RQEs for each MRQ is sufficient. Also, since NVMET data buffers are now 128 bytes, lpfc_nvmet_mrq_post is not necessary anymore as we will always post the max (512) buffers per NVMET MRQ. Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com> Signed-off-by: James Smart <james.smart@broadcom.com> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
-rw-r--r--drivers/scsi/lpfc/lpfc.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c92
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c73
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c246
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c78
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h4
9 files changed, 291 insertions, 233 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index c4b38491da8e..72641b1d3ab8 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -141,6 +141,13 @@ struct lpfc_dmabuf {
141 uint32_t buffer_tag; /* used for tagged queue ring */ 141 uint32_t buffer_tag; /* used for tagged queue ring */
142}; 142};
143 143
144struct lpfc_nvmet_ctxbuf {
145 struct list_head list;
146 struct lpfc_nvmet_rcv_ctx *context;
147 struct lpfc_iocbq *iocbq;
148 struct lpfc_sglq *sglq;
149};
150
144struct lpfc_dma_pool { 151struct lpfc_dma_pool {
145 struct lpfc_dmabuf *elements; 152 struct lpfc_dmabuf *elements;
146 uint32_t max_count; 153 uint32_t max_count;
@@ -163,9 +170,6 @@ struct rqb_dmabuf {
163 struct lpfc_dmabuf dbuf; 170 struct lpfc_dmabuf dbuf;
164 uint16_t total_size; 171 uint16_t total_size;
165 uint16_t bytes_recv; 172 uint16_t bytes_recv;
166 void *context;
167 struct lpfc_iocbq *iocbq;
168 struct lpfc_sglq *sglq;
169 struct lpfc_queue *hrq; /* ptr to associated Header RQ */ 173 struct lpfc_queue *hrq; /* ptr to associated Header RQ */
170 struct lpfc_queue *drq; /* ptr to associated Data RQ */ 174 struct lpfc_queue *drq; /* ptr to associated Data RQ */
171}; 175};
@@ -777,7 +781,6 @@ struct lpfc_hba {
777 uint32_t cfg_nvme_oas; 781 uint32_t cfg_nvme_oas;
778 uint32_t cfg_nvme_io_channel; 782 uint32_t cfg_nvme_io_channel;
779 uint32_t cfg_nvmet_mrq; 783 uint32_t cfg_nvmet_mrq;
780 uint32_t cfg_nvmet_mrq_post;
781 uint32_t cfg_enable_nvmet; 784 uint32_t cfg_enable_nvmet;
782 uint32_t cfg_nvme_enable_fb; 785 uint32_t cfg_nvme_enable_fb;
783 uint32_t cfg_nvmet_fb_size; 786 uint32_t cfg_nvmet_fb_size;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 129d6cd7635b..65264582915a 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3316,14 +3316,6 @@ LPFC_ATTR_R(nvmet_mrq,
3316 "Specify number of RQ pairs for processing NVMET cmds"); 3316 "Specify number of RQ pairs for processing NVMET cmds");
3317 3317
3318/* 3318/*
3319 * lpfc_nvmet_mrq_post: Specify number buffers to post on every MRQ
3320 *
3321 */
3322LPFC_ATTR_R(nvmet_mrq_post, LPFC_DEF_MRQ_POST,
3323 LPFC_MIN_MRQ_POST, LPFC_MAX_MRQ_POST,
3324 "Specify number of buffers to post on every MRQ");
3325
3326/*
3327 * lpfc_enable_fc4_type: Defines what FC4 types are supported. 3319 * lpfc_enable_fc4_type: Defines what FC4 types are supported.
3328 * Supported Values: 1 - register just FCP 3320 * Supported Values: 1 - register just FCP
3329 * 3 - register both FCP and NVME 3321 * 3 - register both FCP and NVME
@@ -5158,7 +5150,6 @@ struct device_attribute *lpfc_hba_attrs[] = {
5158 &dev_attr_lpfc_suppress_rsp, 5150 &dev_attr_lpfc_suppress_rsp,
5159 &dev_attr_lpfc_nvme_io_channel, 5151 &dev_attr_lpfc_nvme_io_channel,
5160 &dev_attr_lpfc_nvmet_mrq, 5152 &dev_attr_lpfc_nvmet_mrq,
5161 &dev_attr_lpfc_nvmet_mrq_post,
5162 &dev_attr_lpfc_nvme_enable_fb, 5153 &dev_attr_lpfc_nvme_enable_fb,
5163 &dev_attr_lpfc_nvmet_fb_size, 5154 &dev_attr_lpfc_nvmet_fb_size,
5164 &dev_attr_lpfc_enable_bg, 5155 &dev_attr_lpfc_enable_bg,
@@ -6198,7 +6189,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
6198 6189
6199 lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type); 6190 lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type);
6200 lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq); 6191 lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq);
6201 lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post);
6202 6192
6203 /* Initialize first burst. Target vs Initiator are different. */ 6193 /* Initialize first burst. Target vs Initiator are different. */
6204 lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); 6194 lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
@@ -6295,7 +6285,6 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
6295 /* Not NVME Target mode. Turn off Target parameters. */ 6285 /* Not NVME Target mode. Turn off Target parameters. */
6296 phba->nvmet_support = 0; 6286 phba->nvmet_support = 0;
6297 phba->cfg_nvmet_mrq = 0; 6287 phba->cfg_nvmet_mrq = 0;
6298 phba->cfg_nvmet_mrq_post = 0;
6299 phba->cfg_nvmet_fb_size = 0; 6288 phba->cfg_nvmet_fb_size = 0;
6300 } 6289 }
6301 6290
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index fb7fc48a1324..cc95abd130b4 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -75,6 +75,8 @@ void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
75void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *); 75void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
76void lpfc_retry_pport_discovery(struct lpfc_hba *); 76void lpfc_retry_pport_discovery(struct lpfc_hba *);
77void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t); 77void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t);
78int lpfc_init_iocb_list(struct lpfc_hba *phba, int cnt);
79void lpfc_free_iocb_list(struct lpfc_hba *phba);
78 80
79void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *); 81void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
80void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 82void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -246,16 +248,14 @@ struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
246void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *); 248void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
247struct rqb_dmabuf *lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba); 249struct rqb_dmabuf *lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba);
248void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab); 250void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab);
249void lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp, 251void lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba,
250 struct lpfc_dmabuf *mp); 252 struct lpfc_nvmet_ctxbuf *ctxp);
251int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, 253int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
252 struct fc_frame_header *fc_hdr); 254 struct fc_frame_header *fc_hdr);
253void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *, 255void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
254 uint16_t); 256 uint16_t);
255int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, 257int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
256 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe); 258 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe);
257int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq,
258 struct lpfc_queue *dq, int count);
259int lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq); 259int lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq);
260void lpfc_unregister_fcf(struct lpfc_hba *); 260void lpfc_unregister_fcf(struct lpfc_hba *);
261void lpfc_unregister_fcf_rescan(struct lpfc_hba *); 261void lpfc_unregister_fcf_rescan(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 26b6a843d32d..86b0b26dfeea 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1099,7 +1099,7 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1099 1099
1100 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { 1100 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1101 ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP); 1101 ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
1102 lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); 1102 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1103 } 1103 }
1104 } 1104 }
1105 1105
@@ -3381,7 +3381,7 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
3381{ 3381{
3382 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3382 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3383 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3383 uint16_t i, lxri, xri_cnt, els_xri_cnt;
3384 uint16_t nvmet_xri_cnt, tot_cnt; 3384 uint16_t nvmet_xri_cnt;
3385 LIST_HEAD(nvmet_sgl_list); 3385 LIST_HEAD(nvmet_sgl_list);
3386 int rc; 3386 int rc;
3387 3387
@@ -3389,20 +3389,9 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
3389 * update on pci function's nvmet xri-sgl list 3389 * update on pci function's nvmet xri-sgl list
3390 */ 3390 */
3391 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3391 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3392 nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post;
3393 3392
3394 /* Ensure we at least meet the minimun for the system */ 3393 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
3395 if (nvmet_xri_cnt < LPFC_NVMET_RQE_DEF_COUNT) 3394 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3396 nvmet_xri_cnt = LPFC_NVMET_RQE_DEF_COUNT;
3397
3398 tot_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3399 if (nvmet_xri_cnt > tot_cnt) {
3400 phba->cfg_nvmet_mrq_post = tot_cnt / phba->cfg_nvmet_mrq;
3401 nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post;
3402 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3403 "6301 NVMET post-sgl count changed to %d\n",
3404 phba->cfg_nvmet_mrq_post);
3405 }
3406 3395
3407 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { 3396 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
3408 /* els xri-sgl expanded */ 3397 /* els xri-sgl expanded */
@@ -5835,6 +5824,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5835 spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock); 5824 spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
5836 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list); 5825 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
5837 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 5826 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
5827 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_list);
5828
5838 /* Fast-path XRI aborted CQ Event work queue list */ 5829 /* Fast-path XRI aborted CQ Event work queue list */
5839 INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue); 5830 INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
5840 } 5831 }
@@ -6279,7 +6270,7 @@ lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
6279 * 6270 *
6280 * This routine is invoked to free the driver's IOCB list and memory. 6271 * This routine is invoked to free the driver's IOCB list and memory.
6281 **/ 6272 **/
6282static void 6273void
6283lpfc_free_iocb_list(struct lpfc_hba *phba) 6274lpfc_free_iocb_list(struct lpfc_hba *phba)
6284{ 6275{
6285 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 6276 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
@@ -6307,7 +6298,7 @@ lpfc_free_iocb_list(struct lpfc_hba *phba)
6307 * 0 - successful 6298 * 0 - successful
6308 * other values - error 6299 * other values - error
6309 **/ 6300 **/
6310static int 6301int
6311lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 6302lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
6312{ 6303{
6313 struct lpfc_iocbq *iocbq_entry = NULL; 6304 struct lpfc_iocbq *iocbq_entry = NULL;
@@ -8322,46 +8313,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
8322} 8313}
8323 8314
8324int 8315int
8325lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
8326 struct lpfc_queue *drq, int count)
8327{
8328 int rc, i;
8329 struct lpfc_rqe hrqe;
8330 struct lpfc_rqe drqe;
8331 struct lpfc_rqb *rqbp;
8332 struct rqb_dmabuf *rqb_buffer;
8333 LIST_HEAD(rqb_buf_list);
8334
8335 rqbp = hrq->rqbp;
8336 for (i = 0; i < count; i++) {
8337 rqb_buffer = (rqbp->rqb_alloc_buffer)(phba);
8338 if (!rqb_buffer)
8339 break;
8340 rqb_buffer->hrq = hrq;
8341 rqb_buffer->drq = drq;
8342 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
8343 }
8344 while (!list_empty(&rqb_buf_list)) {
8345 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
8346 hbuf.list);
8347
8348 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
8349 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
8350 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
8351 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
8352 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
8353 if (rc < 0) {
8354 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
8355 } else {
8356 list_add_tail(&rqb_buffer->hbuf.list,
8357 &rqbp->rqb_buffer_list);
8358 rqbp->buffer_count++;
8359 }
8360 }
8361 return 1;
8362}
8363
8364int
8365lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) 8316lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
8366{ 8317{
8367 struct lpfc_rqb *rqbp; 8318 struct lpfc_rqb *rqbp;
@@ -11103,7 +11054,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
11103 struct lpfc_hba *phba; 11054 struct lpfc_hba *phba;
11104 struct lpfc_vport *vport = NULL; 11055 struct lpfc_vport *vport = NULL;
11105 struct Scsi_Host *shost = NULL; 11056 struct Scsi_Host *shost = NULL;
11106 int error, cnt, num; 11057 int error;
11107 uint32_t cfg_mode, intr_mode; 11058 uint32_t cfg_mode, intr_mode;
11108 11059
11109 /* Allocate memory for HBA structure */ 11060 /* Allocate memory for HBA structure */
@@ -11137,27 +11088,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
11137 goto out_unset_pci_mem_s4; 11088 goto out_unset_pci_mem_s4;
11138 } 11089 }
11139 11090
11140 cnt = phba->cfg_iocb_cnt * 1024;
11141 if (phba->nvmet_support) {
11142 /* Ensure we at least meet the minimun for the system */
11143 num = (phba->cfg_nvmet_mrq_post * phba->cfg_nvmet_mrq);
11144 if (num < LPFC_NVMET_RQE_DEF_COUNT)
11145 num = LPFC_NVMET_RQE_DEF_COUNT;
11146 cnt += num;
11147 }
11148
11149 /* Initialize and populate the iocb list per host */
11150 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11151 "2821 initialize iocb list %d total %d\n",
11152 phba->cfg_iocb_cnt, cnt);
11153 error = lpfc_init_iocb_list(phba, cnt);
11154
11155 if (error) {
11156 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11157 "1413 Failed to initialize iocb list.\n");
11158 goto out_unset_driver_resource_s4;
11159 }
11160
11161 INIT_LIST_HEAD(&phba->active_rrq_list); 11091 INIT_LIST_HEAD(&phba->active_rrq_list);
11162 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 11092 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
11163 11093
@@ -11166,7 +11096,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
11166 if (error) { 11096 if (error) {
11167 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11097 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11168 "1414 Failed to set up driver resource.\n"); 11098 "1414 Failed to set up driver resource.\n");
11169 goto out_free_iocb_list; 11099 goto out_unset_driver_resource_s4;
11170 } 11100 }
11171 11101
11172 /* Get the default values for Model Name and Description */ 11102 /* Get the default values for Model Name and Description */
@@ -11266,8 +11196,6 @@ out_destroy_shost:
11266 lpfc_destroy_shost(phba); 11196 lpfc_destroy_shost(phba);
11267out_unset_driver_resource: 11197out_unset_driver_resource:
11268 lpfc_unset_driver_resource_phase2(phba); 11198 lpfc_unset_driver_resource_phase2(phba);
11269out_free_iocb_list:
11270 lpfc_free_iocb_list(phba);
11271out_unset_driver_resource_s4: 11199out_unset_driver_resource_s4:
11272 lpfc_sli4_driver_resource_unset(phba); 11200 lpfc_sli4_driver_resource_unset(phba);
11273out_unset_pci_mem_s4: 11201out_unset_pci_mem_s4:
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 91060afc9721..fcc05a1517c2 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -629,8 +629,6 @@ struct rqb_dmabuf *
629lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) 629lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
630{ 630{
631 struct rqb_dmabuf *dma_buf; 631 struct rqb_dmabuf *dma_buf;
632 struct lpfc_iocbq *nvmewqe;
633 union lpfc_wqe128 *wqe;
634 632
635 dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL); 633 dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL);
636 if (!dma_buf) 634 if (!dma_buf)
@@ -651,60 +649,6 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
651 return NULL; 649 return NULL;
652 } 650 }
653 dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE; 651 dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE;
654
655 dma_buf->context = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx),
656 GFP_KERNEL);
657 if (!dma_buf->context) {
658 pci_pool_free(phba->lpfc_nvmet_drb_pool, dma_buf->dbuf.virt,
659 dma_buf->dbuf.phys);
660 pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
661 dma_buf->hbuf.phys);
662 kfree(dma_buf);
663 return NULL;
664 }
665
666 dma_buf->iocbq = lpfc_sli_get_iocbq(phba);
667 if (!dma_buf->iocbq) {
668 kfree(dma_buf->context);
669 pci_pool_free(phba->lpfc_nvmet_drb_pool, dma_buf->dbuf.virt,
670 dma_buf->dbuf.phys);
671 pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
672 dma_buf->hbuf.phys);
673 kfree(dma_buf);
674 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
675 "2621 Ran out of nvmet iocb/WQEs\n");
676 return NULL;
677 }
678 dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
679 nvmewqe = dma_buf->iocbq;
680 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
681 /* Initialize WQE */
682 memset(wqe, 0, sizeof(union lpfc_wqe));
683 /* Word 7 */
684 bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI);
685 bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
686 bf_set(wqe_pu, &wqe->generic.wqe_com, 1);
687 /* Word 10 */
688 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
689 bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
690 bf_set(wqe_qosd, &wqe->generic.wqe_com, 0);
691
692 dma_buf->iocbq->context1 = NULL;
693 spin_lock(&phba->sli4_hba.sgl_list_lock);
694 dma_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, dma_buf->iocbq);
695 spin_unlock(&phba->sli4_hba.sgl_list_lock);
696 if (!dma_buf->sglq) {
697 lpfc_sli_release_iocbq(phba, dma_buf->iocbq);
698 kfree(dma_buf->context);
699 pci_pool_free(phba->lpfc_nvmet_drb_pool, dma_buf->dbuf.virt,
700 dma_buf->dbuf.phys);
701 pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
702 dma_buf->hbuf.phys);
703 kfree(dma_buf);
704 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
705 "6132 Ran out of nvmet XRIs\n");
706 return NULL;
707 }
708 return dma_buf; 652 return dma_buf;
709} 653}
710 654
@@ -723,18 +667,6 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
723void 667void
724lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab) 668lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab)
725{ 669{
726 unsigned long flags;
727
728 __lpfc_clear_active_sglq(phba, dmab->sglq->sli4_lxritag);
729 dmab->sglq->state = SGL_FREED;
730 dmab->sglq->ndlp = NULL;
731
732 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags);
733 list_add_tail(&dmab->sglq->list, &phba->sli4_hba.lpfc_nvmet_sgl_list);
734 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, flags);
735
736 lpfc_sli_release_iocbq(phba, dmab->iocbq);
737 kfree(dmab->context);
738 pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); 670 pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
739 pci_pool_free(phba->lpfc_nvmet_drb_pool, 671 pci_pool_free(phba->lpfc_nvmet_drb_pool,
740 dmab->dbuf.virt, dmab->dbuf.phys); 672 dmab->dbuf.virt, dmab->dbuf.phys);
@@ -822,6 +754,11 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
822 rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); 754 rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
823 if (rc < 0) { 755 if (rc < 0) {
824 (rqbp->rqb_free_buffer)(phba, rqb_entry); 756 (rqbp->rqb_free_buffer)(phba, rqb_entry);
757 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
758 "6409 Cannot post to RQ %d: %x %x\n",
759 rqb_entry->hrq->queue_id,
760 rqb_entry->hrq->host_index,
761 rqb_entry->hrq->hba_index);
825 } else { 762 } else {
826 list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); 763 list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
827 rqbp->buffer_count++; 764 rqbp->buffer_count++;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index dfa7296499cf..fcc77ae0c71c 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -142,7 +142,7 @@ out:
142} 142}
143 143
144/** 144/**
145 * lpfc_nvmet_rq_post - Repost a NVMET RQ DMA buffer and clean up context 145 * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
146 * @phba: HBA buffer is associated with 146 * @phba: HBA buffer is associated with
147 * @ctxp: context to clean up 147 * @ctxp: context to clean up
148 * @mp: Buffer to free 148 * @mp: Buffer to free
@@ -155,24 +155,24 @@ out:
155 * Returns: None 155 * Returns: None
156 **/ 156 **/
157void 157void
158lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp, 158lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
159 struct lpfc_dmabuf *mp)
160{ 159{
161 if (ctxp) { 160 struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
162 if (ctxp->flag) 161 unsigned long iflag;
163 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 162
164 "6314 rq_post ctx xri x%x flag x%x\n", 163 if (ctxp->txrdy) {
165 ctxp->oxid, ctxp->flag); 164 pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
166 165 ctxp->txrdy_phys);
167 if (ctxp->txrdy) { 166 ctxp->txrdy = NULL;
168 pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy, 167 ctxp->txrdy_phys = 0;
169 ctxp->txrdy_phys);
170 ctxp->txrdy = NULL;
171 ctxp->txrdy_phys = 0;
172 }
173 ctxp->state = LPFC_NVMET_STE_FREE;
174 } 168 }
175 lpfc_rq_buf_free(phba, mp); 169 ctxp->state = LPFC_NVMET_STE_FREE;
170
171 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag);
172 list_add_tail(&ctx_buf->list,
173 &phba->sli4_hba.lpfc_nvmet_ctx_list);
174 phba->sli4_hba.nvmet_ctx_cnt++;
175 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag);
176} 176}
177 177
178#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 178#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -718,7 +718,7 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
718 if (aborting) 718 if (aborting)
719 return; 719 return;
720 720
721 lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); 721 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
722} 722}
723 723
724static struct nvmet_fc_target_template lpfc_tgttemplate = { 724static struct nvmet_fc_target_template lpfc_tgttemplate = {
@@ -739,17 +739,128 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = {
739 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport), 739 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
740}; 740};
741 741
742void
743lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
744{
745 struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
746 unsigned long flags;
747
748 list_for_each_entry_safe(
749 ctx_buf, next_ctx_buf,
750 &phba->sli4_hba.lpfc_nvmet_ctx_list, list) {
751 spin_lock_irqsave(
752 &phba->sli4_hba.abts_nvme_buf_list_lock, flags);
753 list_del_init(&ctx_buf->list);
754 spin_unlock_irqrestore(
755 &phba->sli4_hba.abts_nvme_buf_list_lock, flags);
756 __lpfc_clear_active_sglq(phba,
757 ctx_buf->sglq->sli4_lxritag);
758 ctx_buf->sglq->state = SGL_FREED;
759 ctx_buf->sglq->ndlp = NULL;
760
761 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags);
762 list_add_tail(&ctx_buf->sglq->list,
763 &phba->sli4_hba.lpfc_nvmet_sgl_list);
764 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock,
765 flags);
766
767 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
768 kfree(ctx_buf->context);
769 }
770}
771
772int
773lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
774{
775 struct lpfc_nvmet_ctxbuf *ctx_buf;
776 struct lpfc_iocbq *nvmewqe;
777 union lpfc_wqe128 *wqe;
778 int i;
779
780 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
781 "6403 Allocate NVMET resources for %d XRIs\n",
782 phba->sli4_hba.nvmet_xri_cnt);
783
784 /* For all nvmet xris, allocate resources needed to process a
785 * received command on a per xri basis.
786 */
787 for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
788 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
789 if (!ctx_buf) {
790 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
791 "6404 Ran out of memory for NVMET\n");
792 return -ENOMEM;
793 }
794
795 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
796 GFP_KERNEL);
797 if (!ctx_buf->context) {
798 kfree(ctx_buf);
799 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
800 "6405 Ran out of NVMET "
801 "context memory\n");
802 return -ENOMEM;
803 }
804 ctx_buf->context->ctxbuf = ctx_buf;
805
806 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
807 if (!ctx_buf->iocbq) {
808 kfree(ctx_buf->context);
809 kfree(ctx_buf);
810 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
811 "6406 Ran out of NVMET iocb/WQEs\n");
812 return -ENOMEM;
813 }
814 ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
815 nvmewqe = ctx_buf->iocbq;
816 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
817 /* Initialize WQE */
818 memset(wqe, 0, sizeof(union lpfc_wqe));
819 /* Word 7 */
820 bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI);
821 bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
822 bf_set(wqe_pu, &wqe->generic.wqe_com, 1);
823 /* Word 10 */
824 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
825 bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
826 bf_set(wqe_qosd, &wqe->generic.wqe_com, 0);
827
828 ctx_buf->iocbq->context1 = NULL;
829 spin_lock(&phba->sli4_hba.sgl_list_lock);
830 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
831 spin_unlock(&phba->sli4_hba.sgl_list_lock);
832 if (!ctx_buf->sglq) {
833 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
834 kfree(ctx_buf->context);
835 kfree(ctx_buf);
836 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
837 "6407 Ran out of NVMET XRIs\n");
838 return -ENOMEM;
839 }
840 spin_lock(&phba->sli4_hba.nvmet_io_lock);
841 list_add_tail(&ctx_buf->list,
842 &phba->sli4_hba.lpfc_nvmet_ctx_list);
843 spin_unlock(&phba->sli4_hba.nvmet_io_lock);
844 }
845 phba->sli4_hba.nvmet_ctx_cnt = phba->sli4_hba.nvmet_xri_cnt;
846 return 0;
847}
848
742int 849int
743lpfc_nvmet_create_targetport(struct lpfc_hba *phba) 850lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
744{ 851{
745 struct lpfc_vport *vport = phba->pport; 852 struct lpfc_vport *vport = phba->pport;
746 struct lpfc_nvmet_tgtport *tgtp; 853 struct lpfc_nvmet_tgtport *tgtp;
747 struct nvmet_fc_port_info pinfo; 854 struct nvmet_fc_port_info pinfo;
748 int error = 0; 855 int error;
749 856
750 if (phba->targetport) 857 if (phba->targetport)
751 return 0; 858 return 0;
752 859
860 error = lpfc_nvmet_setup_io_context(phba);
861 if (error)
862 return error;
863
753 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info)); 864 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
754 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); 865 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
755 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn); 866 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
@@ -778,13 +889,16 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
778 &phba->pcidev->dev, 889 &phba->pcidev->dev,
779 &phba->targetport); 890 &phba->targetport);
780#else 891#else
781 error = -ENOMEM; 892 error = -ENOENT;
782#endif 893#endif
783 if (error) { 894 if (error) {
784 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 895 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
785 "6025 Cannot register NVME targetport " 896 "6025 Cannot register NVME targetport "
786 "x%x\n", error); 897 "x%x\n", error);
787 phba->targetport = NULL; 898 phba->targetport = NULL;
899
900 lpfc_nvmet_cleanup_io_context(phba);
901
788 } else { 902 } else {
789 tgtp = (struct lpfc_nvmet_tgtport *) 903 tgtp = (struct lpfc_nvmet_tgtport *)
790 phba->targetport->private; 904 phba->targetport->private;
@@ -874,7 +988,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
874 list_for_each_entry_safe(ctxp, next_ctxp, 988 list_for_each_entry_safe(ctxp, next_ctxp,
875 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 989 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
876 list) { 990 list) {
877 if (ctxp->rqb_buffer->sglq->sli4_xritag != xri) 991 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
878 continue; 992 continue;
879 993
880 /* Check if we already received a free context call 994 /* Check if we already received a free context call
@@ -895,7 +1009,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
895 (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE || 1009 (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
896 ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 1010 ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
897 lpfc_set_rrq_active(phba, ndlp, 1011 lpfc_set_rrq_active(phba, ndlp,
898 ctxp->rqb_buffer->sglq->sli4_lxritag, 1012 ctxp->ctxbuf->sglq->sli4_lxritag,
899 rxid, 1); 1013 rxid, 1);
900 lpfc_sli4_abts_err_handler(phba, ndlp, axri); 1014 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
901 } 1015 }
@@ -904,8 +1018,8 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
904 "6318 XB aborted %x flg x%x (%x)\n", 1018 "6318 XB aborted %x flg x%x (%x)\n",
905 ctxp->oxid, ctxp->flag, released); 1019 ctxp->oxid, ctxp->flag, released);
906 if (released) 1020 if (released)
907 lpfc_nvmet_rq_post(phba, ctxp, 1021 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
908 &ctxp->rqb_buffer->hbuf); 1022
909 if (rrq_empty) 1023 if (rrq_empty)
910 lpfc_worker_wake_up(phba); 1024 lpfc_worker_wake_up(phba);
911 return; 1025 return;
@@ -933,7 +1047,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
933 list_for_each_entry_safe(ctxp, next_ctxp, 1047 list_for_each_entry_safe(ctxp, next_ctxp,
934 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1048 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
935 list) { 1049 list) {
936 if (ctxp->rqb_buffer->sglq->sli4_xritag != xri) 1050 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
937 continue; 1051 continue;
938 1052
939 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); 1053 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
@@ -985,6 +1099,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
985 init_completion(&tgtp->tport_unreg_done); 1099 init_completion(&tgtp->tport_unreg_done);
986 nvmet_fc_unregister_targetport(phba->targetport); 1100 nvmet_fc_unregister_targetport(phba->targetport);
987 wait_for_completion_timeout(&tgtp->tport_unreg_done, 5); 1101 wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
1102 lpfc_nvmet_cleanup_io_context(phba);
988 } 1103 }
989 phba->targetport = NULL; 1104 phba->targetport = NULL;
990#endif 1105#endif
@@ -1115,15 +1230,18 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1115 struct lpfc_nvmet_rcv_ctx *ctxp; 1230 struct lpfc_nvmet_rcv_ctx *ctxp;
1116 struct lpfc_nvmet_tgtport *tgtp; 1231 struct lpfc_nvmet_tgtport *tgtp;
1117 struct fc_frame_header *fc_hdr; 1232 struct fc_frame_header *fc_hdr;
1233 struct lpfc_nvmet_ctxbuf *ctx_buf;
1118 uint32_t *payload; 1234 uint32_t *payload;
1119 uint32_t size, oxid, sid, rc; 1235 uint32_t size, oxid, sid, rc;
1236 unsigned long iflag;
1120#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1237#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1121 uint32_t id; 1238 uint32_t id;
1122#endif 1239#endif
1123 1240
1241 ctx_buf = NULL;
1124 if (!nvmebuf || !phba->targetport) { 1242 if (!nvmebuf || !phba->targetport) {
1125 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1243 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1126 "6157 FCP Drop IO\n"); 1244 "6157 NVMET FCP Drop IO\n");
1127 oxid = 0; 1245 oxid = 0;
1128 size = 0; 1246 size = 0;
1129 sid = 0; 1247 sid = 0;
@@ -1131,6 +1249,23 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1131 goto dropit; 1249 goto dropit;
1132 } 1250 }
1133 1251
1252 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag);
1253 if (phba->sli4_hba.nvmet_ctx_cnt) {
1254 list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_list,
1255 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
1256 phba->sli4_hba.nvmet_ctx_cnt--;
1257 }
1258 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag);
1259
1260 if (!ctx_buf) {
1261 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1262 "6408 No NVMET ctx Drop IO\n");
1263 oxid = 0;
1264 size = 0;
1265 sid = 0;
1266 ctxp = NULL;
1267 goto dropit;
1268 }
1134 1269
1135 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1270 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1136 payload = (uint32_t *)(nvmebuf->dbuf.virt); 1271 payload = (uint32_t *)(nvmebuf->dbuf.virt);
@@ -1139,16 +1274,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1139 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 1274 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1140 sid = sli4_sid_from_fc_hdr(fc_hdr); 1275 sid = sli4_sid_from_fc_hdr(fc_hdr);
1141 1276
1142 ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmebuf->context; 1277 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
1143 if (ctxp == NULL) {
1144 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
1145 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1146 "6158 FCP Drop IO x%x: Alloc\n",
1147 oxid);
1148 lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
1149 /* Cannot send ABTS without context */
1150 return;
1151 }
1152 memset(ctxp, 0, sizeof(ctxp->ctx)); 1278 memset(ctxp, 0, sizeof(ctxp->ctx));
1153 ctxp->wqeq = NULL; 1279 ctxp->wqeq = NULL;
1154 ctxp->txrdy = NULL; 1280 ctxp->txrdy = NULL;
@@ -1158,9 +1284,9 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1158 ctxp->oxid = oxid; 1284 ctxp->oxid = oxid;
1159 ctxp->sid = sid; 1285 ctxp->sid = sid;
1160 ctxp->state = LPFC_NVMET_STE_RCV; 1286 ctxp->state = LPFC_NVMET_STE_RCV;
1161 ctxp->rqb_buffer = nvmebuf;
1162 ctxp->entry_cnt = 1; 1287 ctxp->entry_cnt = 1;
1163 ctxp->flag = 0; 1288 ctxp->flag = 0;
1289 ctxp->ctxbuf = ctx_buf;
1164 spin_lock_init(&ctxp->ctxlock); 1290 spin_lock_init(&ctxp->ctxlock);
1165 1291
1166#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1292#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -1192,6 +1318,9 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1192 * The calling sequence should be: 1318 * The calling sequence should be:
1193 * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done 1319 * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
1194 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. 1320 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
1321 * When we return from nvmet_fc_rcv_fcp_req, all relevant info in
1322 * the NVME command / FC header is stored, so we are free to repost
1323 * the buffer.
1195 */ 1324 */
1196 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, 1325 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
1197 payload, size); 1326 payload, size);
@@ -1199,6 +1328,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1199 /* Process FCP command */ 1328 /* Process FCP command */
1200 if (rc == 0) { 1329 if (rc == 0) {
1201 atomic_inc(&tgtp->rcv_fcp_cmd_out); 1330 atomic_inc(&tgtp->rcv_fcp_cmd_out);
1331 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
1202 return; 1332 return;
1203 } 1333 }
1204 1334
@@ -1213,15 +1343,17 @@ dropit:
1213 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n", 1343 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
1214 oxid, size, sid); 1344 oxid, size, sid);
1215 if (oxid) { 1345 if (oxid) {
1346 lpfc_nvmet_defer_release(phba, ctxp);
1216 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); 1347 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
1348 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
1217 return; 1349 return;
1218 } 1350 }
1219 1351
1220 if (nvmebuf) { 1352 if (ctx_buf)
1221 nvmebuf->iocbq->hba_wqidx = 0; 1353 lpfc_nvmet_ctxbuf_post(phba, ctx_buf);
1222 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */ 1354
1223 lpfc_nvmet_rq_post(phba, ctxp, &nvmebuf->hbuf); 1355 if (nvmebuf)
1224 } 1356 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
1225#endif 1357#endif
1226} 1358}
1227 1359
@@ -1273,7 +1405,7 @@ lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
1273 uint64_t isr_timestamp) 1405 uint64_t isr_timestamp)
1274{ 1406{
1275 if (phba->nvmet_support == 0) { 1407 if (phba->nvmet_support == 0) {
1276 lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf); 1408 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
1277 return; 1409 return;
1278 } 1410 }
1279 lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf, 1411 lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf,
@@ -1474,7 +1606,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
1474 nvmewqe = ctxp->wqeq; 1606 nvmewqe = ctxp->wqeq;
1475 if (nvmewqe == NULL) { 1607 if (nvmewqe == NULL) {
1476 /* Allocate buffer for command wqe */ 1608 /* Allocate buffer for command wqe */
1477 nvmewqe = ctxp->rqb_buffer->iocbq; 1609 nvmewqe = ctxp->ctxbuf->iocbq;
1478 if (nvmewqe == NULL) { 1610 if (nvmewqe == NULL) {
1479 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1611 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1480 "6110 lpfc_nvmet_prep_fcp_wqe: No " 1612 "6110 lpfc_nvmet_prep_fcp_wqe: No "
@@ -1501,7 +1633,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
1501 return NULL; 1633 return NULL;
1502 } 1634 }
1503 1635
1504 sgl = (struct sli4_sge *)ctxp->rqb_buffer->sglq->sgl; 1636 sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
1505 switch (rsp->op) { 1637 switch (rsp->op) {
1506 case NVMET_FCOP_READDATA: 1638 case NVMET_FCOP_READDATA:
1507 case NVMET_FCOP_READDATA_RSP: 1639 case NVMET_FCOP_READDATA_RSP:
@@ -1851,15 +1983,16 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1851 wcqe->word0, wcqe->total_data_placed, 1983 wcqe->word0, wcqe->total_data_placed,
1852 result, wcqe->word3); 1984 result, wcqe->word3);
1853 1985
1986 cmdwqe->context2 = NULL;
1987 cmdwqe->context3 = NULL;
1854 /* 1988 /*
1855 * if transport has released ctx, then can reuse it. Otherwise, 1989 * if transport has released ctx, then can reuse it. Otherwise,
1856 * will be recycled by transport release call. 1990 * will be recycled by transport release call.
1857 */ 1991 */
1858 if (released) 1992 if (released)
1859 lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); 1993 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1860 1994
1861 cmdwqe->context2 = NULL; 1995 /* This is the iocbq for the abort, not the command */
1862 cmdwqe->context3 = NULL;
1863 lpfc_sli_release_iocbq(phba, cmdwqe); 1996 lpfc_sli_release_iocbq(phba, cmdwqe);
1864 1997
1865 /* Since iaab/iaar are NOT set, there is no work left. 1998 /* Since iaab/iaar are NOT set, there is no work left.
@@ -1932,15 +2065,15 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1932 ctxp->oxid, ctxp->flag, released, 2065 ctxp->oxid, ctxp->flag, released,
1933 wcqe->word0, wcqe->total_data_placed, 2066 wcqe->word0, wcqe->total_data_placed,
1934 result, wcqe->word3); 2067 result, wcqe->word3);
2068
2069 cmdwqe->context2 = NULL;
2070 cmdwqe->context3 = NULL;
1935 /* 2071 /*
1936 * if transport has released ctx, then can reuse it. Otherwise, 2072 * if transport has released ctx, then can reuse it. Otherwise,
1937 * will be recycled by transport release call. 2073 * will be recycled by transport release call.
1938 */ 2074 */
1939 if (released) 2075 if (released)
1940 lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); 2076 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1941
1942 cmdwqe->context2 = NULL;
1943 cmdwqe->context3 = NULL;
1944 2077
1945 /* Since iaab/iaar are NOT set, there is no work left. 2078 /* Since iaab/iaar are NOT set, there is no work left.
1946 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted 2079 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
@@ -2002,10 +2135,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
2002 sid, xri, ctxp->wqeq->sli4_xritag); 2135 sid, xri, ctxp->wqeq->sli4_xritag);
2003 2136
2004 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2137 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2005 if (!ctxp->wqeq) {
2006 ctxp->wqeq = ctxp->rqb_buffer->iocbq;
2007 ctxp->wqeq->hba_wqidx = 0;
2008 }
2009 2138
2010 ndlp = lpfc_findnode_did(phba->pport, sid); 2139 ndlp = lpfc_findnode_did(phba->pport, sid);
2011 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || 2140 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
@@ -2101,7 +2230,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
2101 2230
2102 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2231 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2103 if (!ctxp->wqeq) { 2232 if (!ctxp->wqeq) {
2104 ctxp->wqeq = ctxp->rqb_buffer->iocbq; 2233 ctxp->wqeq = ctxp->ctxbuf->iocbq;
2105 ctxp->wqeq->hba_wqidx = 0; 2234 ctxp->wqeq->hba_wqidx = 0;
2106 } 2235 }
2107 2236
@@ -2239,7 +2368,7 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
2239 2368
2240 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2369 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2241 if (!ctxp->wqeq) { 2370 if (!ctxp->wqeq) {
2242 ctxp->wqeq = ctxp->rqb_buffer->iocbq; 2371 ctxp->wqeq = ctxp->ctxbuf->iocbq;
2243 ctxp->wqeq->hba_wqidx = 0; 2372 ctxp->wqeq->hba_wqidx = 0;
2244 } 2373 }
2245 2374
@@ -2294,6 +2423,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
2294 } 2423 }
2295 abts_wqeq = ctxp->wqeq; 2424 abts_wqeq = ctxp->wqeq;
2296 wqe_abts = &abts_wqeq->wqe; 2425 wqe_abts = &abts_wqeq->wqe;
2426
2297 lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri); 2427 lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
2298 2428
2299 spin_lock_irqsave(&phba->hbalock, flags); 2429 spin_lock_irqsave(&phba->hbalock, flags);
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 55f2a859dc70..6eb2f5d8d4ed 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -106,6 +106,7 @@ struct lpfc_nvmet_rcv_ctx {
106#define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */ 106#define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */
107#define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */ 107#define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */
108 struct rqb_dmabuf *rqb_buffer; 108 struct rqb_dmabuf *rqb_buffer;
109 struct lpfc_nvmet_ctxbuf *ctxbuf;
109 110
110#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 111#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
111 uint64_t ts_isr_cmd; 112 uint64_t ts_isr_cmd;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 49d5c4700054..d68ee3ee299a 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -6513,6 +6513,49 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
6513 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC"); 6513 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
6514} 6514}
6515 6515
6516static int
6517lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
6518 struct lpfc_queue *drq, int count)
6519{
6520 int rc, i;
6521 struct lpfc_rqe hrqe;
6522 struct lpfc_rqe drqe;
6523 struct lpfc_rqb *rqbp;
6524 struct rqb_dmabuf *rqb_buffer;
6525 LIST_HEAD(rqb_buf_list);
6526
6527 rqbp = hrq->rqbp;
6528 for (i = 0; i < count; i++) {
6529 /* IF RQ is already full, don't bother */
6530 if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
6531 break;
6532 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
6533 if (!rqb_buffer)
6534 break;
6535 rqb_buffer->hrq = hrq;
6536 rqb_buffer->drq = drq;
6537 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
6538 }
6539 while (!list_empty(&rqb_buf_list)) {
6540 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
6541 hbuf.list);
6542
6543 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
6544 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
6545 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
6546 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
6547 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
6548 if (rc < 0) {
6549 rqbp->rqb_free_buffer(phba, rqb_buffer);
6550 } else {
6551 list_add_tail(&rqb_buffer->hbuf.list,
6552 &rqbp->rqb_buffer_list);
6553 rqbp->buffer_count++;
6554 }
6555 }
6556 return 1;
6557}
6558
6516/** 6559/**
6517 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function 6560 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
6518 * @phba: Pointer to HBA context object. 6561 * @phba: Pointer to HBA context object.
@@ -6525,7 +6568,7 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
6525int 6568int
6526lpfc_sli4_hba_setup(struct lpfc_hba *phba) 6569lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6527{ 6570{
6528 int rc, i; 6571 int rc, i, cnt;
6529 LPFC_MBOXQ_t *mboxq; 6572 LPFC_MBOXQ_t *mboxq;
6530 struct lpfc_mqe *mqe; 6573 struct lpfc_mqe *mqe;
6531 uint8_t *vpd; 6574 uint8_t *vpd;
@@ -6876,6 +6919,21 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6876 goto out_destroy_queue; 6919 goto out_destroy_queue;
6877 } 6920 }
6878 phba->sli4_hba.nvmet_xri_cnt = rc; 6921 phba->sli4_hba.nvmet_xri_cnt = rc;
6922
6923 cnt = phba->cfg_iocb_cnt * 1024;
6924 /* We need 1 iocbq for every SGL, for IO processing */
6925 cnt += phba->sli4_hba.nvmet_xri_cnt;
6926 /* Initialize and populate the iocb list per host */
6927 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6928 "2821 initialize iocb list %d total %d\n",
6929 phba->cfg_iocb_cnt, cnt);
6930 rc = lpfc_init_iocb_list(phba, cnt);
6931 if (rc) {
6932 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6933 "1413 Failed to init iocb list.\n");
6934 goto out_destroy_queue;
6935 }
6936
6879 lpfc_nvmet_create_targetport(phba); 6937 lpfc_nvmet_create_targetport(phba);
6880 } else { 6938 } else {
6881 /* update host scsi xri-sgl sizes and mappings */ 6939 /* update host scsi xri-sgl sizes and mappings */
@@ -6895,10 +6953,21 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6895 "and mapping: %d\n", rc); 6953 "and mapping: %d\n", rc);
6896 goto out_destroy_queue; 6954 goto out_destroy_queue;
6897 } 6955 }
6956
6957 cnt = phba->cfg_iocb_cnt * 1024;
6958 /* Initialize and populate the iocb list per host */
6959 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6960 "2820 initialize iocb list %d total %d\n",
6961 phba->cfg_iocb_cnt, cnt);
6962 rc = lpfc_init_iocb_list(phba, cnt);
6963 if (rc) {
6964 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6965 "6301 Failed to init iocb list.\n");
6966 goto out_destroy_queue;
6967 }
6898 } 6968 }
6899 6969
6900 if (phba->nvmet_support && phba->cfg_nvmet_mrq) { 6970 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
6901
6902 /* Post initial buffers to all RQs created */ 6971 /* Post initial buffers to all RQs created */
6903 for (i = 0; i < phba->cfg_nvmet_mrq; i++) { 6972 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
6904 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp; 6973 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
@@ -6911,7 +6980,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6911 lpfc_post_rq_buffer( 6980 lpfc_post_rq_buffer(
6912 phba, phba->sli4_hba.nvmet_mrq_hdr[i], 6981 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
6913 phba->sli4_hba.nvmet_mrq_data[i], 6982 phba->sli4_hba.nvmet_mrq_data[i],
6914 phba->cfg_nvmet_mrq_post); 6983 LPFC_NVMET_RQE_DEF_COUNT);
6915 } 6984 }
6916 } 6985 }
6917 6986
@@ -7078,6 +7147,7 @@ out_unset_queue:
7078 /* Unset all the queues set up in this routine when error out */ 7147 /* Unset all the queues set up in this routine when error out */
7079 lpfc_sli4_queue_unset(phba); 7148 lpfc_sli4_queue_unset(phba);
7080out_destroy_queue: 7149out_destroy_queue:
7150 lpfc_free_iocb_list(phba);
7081 lpfc_sli4_queue_destroy(phba); 7151 lpfc_sli4_queue_destroy(phba);
7082out_stop_timers: 7152out_stop_timers:
7083 lpfc_stop_hba_timers(phba); 7153 lpfc_stop_hba_timers(phba);
@@ -18731,7 +18801,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
18731 18801
18732 spin_lock_irqsave(&pring->ring_lock, iflags); 18802 spin_lock_irqsave(&pring->ring_lock, iflags);
18733 ctxp = pwqe->context2; 18803 ctxp = pwqe->context2;
18734 sglq = ctxp->rqb_buffer->sglq; 18804 sglq = ctxp->ctxbuf->sglq;
18735 if (pwqe->sli4_xritag == NO_XRI) { 18805 if (pwqe->sli4_xritag == NO_XRI) {
18736 pwqe->sli4_lxritag = sglq->sli4_lxritag; 18806 pwqe->sli4_lxritag = sglq->sli4_lxritag;
18737 pwqe->sli4_xritag = sglq->sli4_xritag; 18807 pwqe->sli4_xritag = sglq->sli4_xritag;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 422bde85c9f1..19e2f190ea2e 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -618,10 +618,12 @@ struct lpfc_sli4_hba {
618 uint16_t scsi_xri_start; 618 uint16_t scsi_xri_start;
619 uint16_t els_xri_cnt; 619 uint16_t els_xri_cnt;
620 uint16_t nvmet_xri_cnt; 620 uint16_t nvmet_xri_cnt;
621 uint16_t nvmet_ctx_cnt;
621 struct list_head lpfc_els_sgl_list; 622 struct list_head lpfc_els_sgl_list;
622 struct list_head lpfc_abts_els_sgl_list; 623 struct list_head lpfc_abts_els_sgl_list;
623 struct list_head lpfc_nvmet_sgl_list; 624 struct list_head lpfc_nvmet_sgl_list;
624 struct list_head lpfc_abts_nvmet_ctx_list; 625 struct list_head lpfc_abts_nvmet_ctx_list;
626 struct list_head lpfc_nvmet_ctx_list;
625 struct list_head lpfc_abts_scsi_buf_list; 627 struct list_head lpfc_abts_scsi_buf_list;
626 struct list_head lpfc_abts_nvme_buf_list; 628 struct list_head lpfc_abts_nvme_buf_list;
627 struct lpfc_sglq **lpfc_sglq_active_list; 629 struct lpfc_sglq **lpfc_sglq_active_list;
@@ -662,8 +664,6 @@ struct lpfc_sli4_hba {
662 uint16_t num_online_cpu; 664 uint16_t num_online_cpu;
663 uint16_t num_present_cpu; 665 uint16_t num_present_cpu;
664 uint16_t curr_disp_cpu; 666 uint16_t curr_disp_cpu;
665
666 uint16_t nvmet_mrq_post_idx;
667}; 667};
668 668
669enum lpfc_sge_type { 669enum lpfc_sge_type {