aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
diff options
context:
space:
mode:
authorNaresh Gottumukkala <bgottumukkala@emulex.com>2013-08-26 05:57:38 -0400
committerRoland Dreier <roland@purestorage.com>2013-09-03 00:16:21 -0400
commit43a6b4025c79ded5b44e58ba0db97c29dd38d718 (patch)
treeb06d87d5d9d31c6f3d3e23db1510ce08415ba03e /drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
parent45e86b33ec8b33f9ed41d9f9005f9e663018f8f1 (diff)
RDMA/ocrdma: Create IRD queue fix
1) Fix ocrdma_get_num_posted_shift for upto 128 QPs. 2) Create for min of dev->max_wqe and requested wqe in create_qp. 3) As part of creating ird queue, populate with basic header templates. 4) Make sure all the DB memory allocated to userspace are page aligned. 5) Fix issue in checking the mmap local cache. 6) Some code cleanup. Signed-off-by: Naresh Gottumukkala <bgottumukkala@emulex.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband/hw/ocrdma/ocrdma_verbs.c')
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c97
1 files changed, 57 insertions, 40 deletions
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 5f68dff0d6ca..278b33b628e1 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -186,7 +186,7 @@ static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
186 186
187 mutex_lock(&uctx->mm_list_lock); 187 mutex_lock(&uctx->mm_list_lock);
188 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) { 188 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
189 if (len != mm->key.len || phy_addr != mm->key.phy_addr) 189 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
190 continue; 190 continue;
191 191
192 list_del(&mm->entry); 192 list_del(&mm->entry);
@@ -204,7 +204,7 @@ static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
204 204
205 mutex_lock(&uctx->mm_list_lock); 205 mutex_lock(&uctx->mm_list_lock);
206 list_for_each_entry(mm, &uctx->mm_head, entry) { 206 list_for_each_entry(mm, &uctx->mm_head, entry) {
207 if (len != mm->key.len || phy_addr != mm->key.phy_addr) 207 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
208 continue; 208 continue;
209 209
210 found = true; 210 found = true;
@@ -307,7 +307,10 @@ int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
307 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db + 307 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
308 dev->nic_info.db_total_size)) && 308 dev->nic_info.db_total_size)) &&
309 (len <= dev->nic_info.db_page_size)) { 309 (len <= dev->nic_info.db_page_size)) {
310 /* doorbell mapping */ 310 if (vma->vm_flags & VM_READ)
311 return -EPERM;
312
313 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
311 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 314 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
312 len, vma->vm_page_prot); 315 len, vma->vm_page_prot);
313 } else if (dev->nic_info.dpp_unmapped_len && 316 } else if (dev->nic_info.dpp_unmapped_len &&
@@ -315,12 +318,13 @@ int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
315 (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr + 318 (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
316 dev->nic_info.dpp_unmapped_len)) && 319 dev->nic_info.dpp_unmapped_len)) &&
317 (len <= dev->nic_info.dpp_unmapped_len)) { 320 (len <= dev->nic_info.dpp_unmapped_len)) {
318 /* dpp area mapping */ 321 if (vma->vm_flags & VM_READ)
322 return -EPERM;
323
319 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 324 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
320 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 325 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
321 len, vma->vm_page_prot); 326 len, vma->vm_page_prot);
322 } else { 327 } else {
323 /* queue memory mapping */
324 status = remap_pfn_range(vma, vma->vm_start, 328 status = remap_pfn_range(vma, vma->vm_start,
325 vma->vm_pgoff, len, vma->vm_page_prot); 329 vma->vm_pgoff, len, vma->vm_page_prot);
326 } 330 }
@@ -351,9 +355,9 @@ static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
351 355
352 if (pd->dpp_enabled) { 356 if (pd->dpp_enabled) {
353 dpp_page_addr = dev->nic_info.dpp_unmapped_addr + 357 dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
354 (pd->id * OCRDMA_DPP_PAGE_SIZE); 358 (pd->id * PAGE_SIZE);
355 status = ocrdma_add_mmap(uctx, dpp_page_addr, 359 status = ocrdma_add_mmap(uctx, dpp_page_addr,
356 OCRDMA_DPP_PAGE_SIZE); 360 PAGE_SIZE);
357 if (status) 361 if (status)
358 goto dpp_map_err; 362 goto dpp_map_err;
359 rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr); 363 rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
@@ -369,7 +373,7 @@ static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
369 373
370ucopy_err: 374ucopy_err:
371 if (pd->dpp_enabled) 375 if (pd->dpp_enabled)
372 ocrdma_del_mmap(pd->uctx, dpp_page_addr, OCRDMA_DPP_PAGE_SIZE); 376 ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE);
373dpp_map_err: 377dpp_map_err:
374 ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size); 378 ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
375 return status; 379 return status;
@@ -392,10 +396,18 @@ struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
392 pd->num_dpp_qp = 396 pd->num_dpp_qp =
393 pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0; 397 pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0;
394 } 398 }
399retry:
395 status = ocrdma_mbx_alloc_pd(dev, pd); 400 status = ocrdma_mbx_alloc_pd(dev, pd);
396 if (status) { 401 if (status) {
397 kfree(pd); 402 /* try for pd with out dpp */
398 return ERR_PTR(status); 403 if (pd->dpp_enabled) {
404 pd->dpp_enabled = false;
405 pd->num_dpp_qp = 0;
406 goto retry;
407 } else {
408 kfree(pd);
409 return ERR_PTR(status);
410 }
399 } 411 }
400 412
401 if (udata && context) { 413 if (udata && context) {
@@ -421,9 +433,9 @@ int ocrdma_dealloc_pd(struct ib_pd *ibpd)
421 status = ocrdma_mbx_dealloc_pd(dev, pd); 433 status = ocrdma_mbx_dealloc_pd(dev, pd);
422 if (pd->uctx) { 434 if (pd->uctx) {
423 u64 dpp_db = dev->nic_info.dpp_unmapped_addr + 435 u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
424 (pd->id * OCRDMA_DPP_PAGE_SIZE); 436 (pd->id * PAGE_SIZE);
425 if (pd->dpp_enabled) 437 if (pd->dpp_enabled)
426 ocrdma_del_mmap(pd->uctx, dpp_db, OCRDMA_DPP_PAGE_SIZE); 438 ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE);
427 usr_db = dev->nic_info.unmapped_db + 439 usr_db = dev->nic_info.unmapped_db +
428 (pd->id * dev->nic_info.db_page_size); 440 (pd->id * dev->nic_info.db_page_size);
429 ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size); 441 ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
@@ -693,7 +705,7 @@ static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
693 705
694 memset(&uresp, 0, sizeof(uresp)); 706 memset(&uresp, 0, sizeof(uresp));
695 uresp.cq_id = cq->id; 707 uresp.cq_id = cq->id;
696 uresp.page_size = cq->len; 708 uresp.page_size = PAGE_ALIGN(cq->len);
697 uresp.num_pages = 1; 709 uresp.num_pages = 1;
698 uresp.max_hw_cqe = cq->max_hw_cqe; 710 uresp.max_hw_cqe = cq->max_hw_cqe;
699 uresp.page_addr[0] = cq->pa; 711 uresp.page_addr[0] = cq->pa;
@@ -788,7 +800,8 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq)
788 status = ocrdma_mbx_destroy_cq(dev, cq); 800 status = ocrdma_mbx_destroy_cq(dev, cq);
789 801
790 if (cq->ucontext) { 802 if (cq->ucontext) {
791 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, cq->len); 803 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
804 PAGE_ALIGN(cq->len));
792 ocrdma_del_mmap(cq->ucontext, dev->nic_info.unmapped_db, 805 ocrdma_del_mmap(cq->ucontext, dev->nic_info.unmapped_db,
793 dev->nic_info.db_page_size); 806 dev->nic_info.db_page_size);
794 } 807 }
@@ -817,14 +830,17 @@ static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
817static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev, 830static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
818 struct ib_qp_init_attr *attrs) 831 struct ib_qp_init_attr *attrs)
819{ 832{
820 if (attrs->qp_type != IB_QPT_GSI && 833 if ((attrs->qp_type != IB_QPT_GSI) &&
821 attrs->qp_type != IB_QPT_RC && 834 (attrs->qp_type != IB_QPT_RC) &&
822 attrs->qp_type != IB_QPT_UD) { 835 (attrs->qp_type != IB_QPT_UC) &&
836 (attrs->qp_type != IB_QPT_UD)) {
823 pr_err("%s(%d) unsupported qp type=0x%x requested\n", 837 pr_err("%s(%d) unsupported qp type=0x%x requested\n",
824 __func__, dev->id, attrs->qp_type); 838 __func__, dev->id, attrs->qp_type);
825 return -EINVAL; 839 return -EINVAL;
826 } 840 }
827 if (attrs->cap.max_send_wr > dev->attr.max_wqe) { 841 /* Skip the check for QP1 to support CM size of 128 */
842 if ((attrs->qp_type != IB_QPT_GSI) &&
843 (attrs->cap.max_send_wr > dev->attr.max_wqe)) {
828 pr_err("%s(%d) unsupported send_wr=0x%x requested\n", 844 pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
829 __func__, dev->id, attrs->cap.max_send_wr); 845 __func__, dev->id, attrs->cap.max_send_wr);
830 pr_err("%s(%d) supported send_wr=0x%x\n", 846 pr_err("%s(%d) supported send_wr=0x%x\n",
@@ -875,11 +891,9 @@ static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
875 /* verify consumer QPs are not trying to use GSI QP's CQ */ 891 /* verify consumer QPs are not trying to use GSI QP's CQ */
876 if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) { 892 if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
877 if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) || 893 if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
878 (dev->gsi_sqcq == get_ocrdma_cq(attrs->recv_cq)) || 894 (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
879 (dev->gsi_rqcq == get_ocrdma_cq(attrs->send_cq)) ||
880 (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
881 pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n", 895 pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
882 __func__, dev->id); 896 __func__, dev->id);
883 return -EINVAL; 897 return -EINVAL;
884 } 898 }
885 } 899 }
@@ -902,13 +916,13 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
902 uresp.qp_id = qp->id; 916 uresp.qp_id = qp->id;
903 uresp.sq_dbid = qp->sq.dbid; 917 uresp.sq_dbid = qp->sq.dbid;
904 uresp.num_sq_pages = 1; 918 uresp.num_sq_pages = 1;
905 uresp.sq_page_size = qp->sq.len; 919 uresp.sq_page_size = PAGE_ALIGN(qp->sq.len);
906 uresp.sq_page_addr[0] = qp->sq.pa; 920 uresp.sq_page_addr[0] = qp->sq.pa;
907 uresp.num_wqe_allocated = qp->sq.max_cnt; 921 uresp.num_wqe_allocated = qp->sq.max_cnt;
908 if (!srq) { 922 if (!srq) {
909 uresp.rq_dbid = qp->rq.dbid; 923 uresp.rq_dbid = qp->rq.dbid;
910 uresp.num_rq_pages = 1; 924 uresp.num_rq_pages = 1;
911 uresp.rq_page_size = qp->rq.len; 925 uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
912 uresp.rq_page_addr[0] = qp->rq.pa; 926 uresp.rq_page_addr[0] = qp->rq.pa;
913 uresp.num_rqe_allocated = qp->rq.max_cnt; 927 uresp.num_rqe_allocated = qp->rq.max_cnt;
914 } 928 }
@@ -1043,6 +1057,9 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1043 } 1057 }
1044 qp->dev = dev; 1058 qp->dev = dev;
1045 ocrdma_set_qp_init_params(qp, pd, attrs); 1059 ocrdma_set_qp_init_params(qp, pd, attrs);
1060 if (udata == NULL)
1061 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
1062 OCRDMA_QP_FAST_REG);
1046 1063
1047 mutex_lock(&dev->dev_lock); 1064 mutex_lock(&dev->dev_lock);
1048 status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq, 1065 status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
@@ -1053,8 +1070,6 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1053 1070
1054 /* user space QP's wr_id table are managed in library */ 1071 /* user space QP's wr_id table are managed in library */
1055 if (udata == NULL) { 1072 if (udata == NULL) {
1056 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
1057 OCRDMA_QP_FAST_REG);
1058 status = ocrdma_alloc_wr_id_tbl(qp); 1073 status = ocrdma_alloc_wr_id_tbl(qp);
1059 if (status) 1074 if (status)
1060 goto map_err; 1075 goto map_err;
@@ -1290,22 +1305,17 @@ static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx)
1290static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q) 1305static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
1291{ 1306{
1292 int free_cnt; 1307 int free_cnt;
1293 if (q->head >= q->tail) 1308 return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt;
1294 free_cnt = (q->max_cnt - q->head) + q->tail;
1295 else
1296 free_cnt = q->tail - q->head;
1297 return free_cnt;
1298} 1309}
1299 1310
1300static int is_hw_sq_empty(struct ocrdma_qp *qp) 1311static int is_hw_sq_empty(struct ocrdma_qp *qp)
1301{ 1312{
1302 return (qp->sq.tail == qp->sq.head && 1313 return (qp->sq.tail == qp->sq.head);
1303 ocrdma_hwq_free_cnt(&qp->sq) ? 1 : 0);
1304} 1314}
1305 1315
1306static int is_hw_rq_empty(struct ocrdma_qp *qp) 1316static int is_hw_rq_empty(struct ocrdma_qp *qp)
1307{ 1317{
1308 return (qp->rq.tail == qp->rq.head) ? 1 : 0; 1318 return (qp->rq.tail == qp->rq.head);
1309} 1319}
1310 1320
1311static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q) 1321static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
@@ -1456,9 +1466,11 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
1456 mutex_unlock(&dev->dev_lock); 1466 mutex_unlock(&dev->dev_lock);
1457 1467
1458 if (pd->uctx) { 1468 if (pd->uctx) {
1459 ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa, qp->sq.len); 1469 ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa,
1470 PAGE_ALIGN(qp->sq.len));
1460 if (!qp->srq) 1471 if (!qp->srq)
1461 ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa, qp->rq.len); 1472 ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa,
1473 PAGE_ALIGN(qp->rq.len));
1462 } 1474 }
1463 1475
1464 ocrdma_del_flush_qp(qp); 1476 ocrdma_del_flush_qp(qp);
@@ -1603,7 +1615,8 @@ int ocrdma_destroy_srq(struct ib_srq *ibsrq)
1603 status = ocrdma_mbx_destroy_srq(dev, srq); 1615 status = ocrdma_mbx_destroy_srq(dev, srq);
1604 1616
1605 if (srq->pd->uctx) 1617 if (srq->pd->uctx)
1606 ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa, srq->rq.len); 1618 ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
1619 PAGE_ALIGN(srq->rq.len));
1607 1620
1608 kfree(srq->idx_bit_fields); 1621 kfree(srq->idx_bit_fields);
1609 kfree(srq->rqe_wr_id_tbl); 1622 kfree(srq->rqe_wr_id_tbl);
@@ -1650,7 +1663,7 @@ static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
1650 struct ocrdma_sge *sge, 1663 struct ocrdma_sge *sge,
1651 struct ib_send_wr *wr, u32 wqe_size) 1664 struct ib_send_wr *wr, u32 wqe_size)
1652{ 1665{
1653 if (wr->send_flags & IB_SEND_INLINE) { 1666 if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) {
1654 if (wr->sg_list[0].length > qp->max_inline_data) { 1667 if (wr->sg_list[0].length > qp->max_inline_data) {
1655 pr_err("%s() supported_len=0x%x,\n" 1668 pr_err("%s() supported_len=0x%x,\n"
1656 " unspported len req=0x%x\n", __func__, 1669 " unspported len req=0x%x\n", __func__,
@@ -1662,6 +1675,8 @@ static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
1662 wr->sg_list[0].length); 1675 wr->sg_list[0].length);
1663 hdr->total_len = wr->sg_list[0].length; 1676 hdr->total_len = wr->sg_list[0].length;
1664 wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES); 1677 wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
1678 if (0 == wr->sg_list[0].length)
1679 wqe_size += sizeof(struct ocrdma_sge);
1665 hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT); 1680 hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
1666 } else { 1681 } else {
1667 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list); 1682 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
@@ -2208,7 +2223,8 @@ static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
2208 ocrdma_update_wc(qp, ibwc, tail); 2223 ocrdma_update_wc(qp, ibwc, tail);
2209 *polled = true; 2224 *polled = true;
2210 } 2225 }
2211 wqe_idx = le32_to_cpu(cqe->wq.wqeidx) & OCRDMA_CQE_WQEIDX_MASK; 2226 wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) &
2227 OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx;
2212 if (tail != wqe_idx) 2228 if (tail != wqe_idx)
2213 expand = true; /* Coalesced CQE can't be consumed yet */ 2229 expand = true; /* Coalesced CQE can't be consumed yet */
2214 2230
@@ -2257,7 +2273,8 @@ static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
2257 u32 wqe_idx; 2273 u32 wqe_idx;
2258 2274
2259 srq = get_ocrdma_srq(qp->ibqp.srq); 2275 srq = get_ocrdma_srq(qp->ibqp.srq);
2260 wqe_idx = le32_to_cpu(cqe->rq.buftag_qpn) >> OCRDMA_CQE_BUFTAG_SHIFT; 2276 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
2277 OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
2261 ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx]; 2278 ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
2262 spin_lock_irqsave(&srq->q_lock, flags); 2279 spin_lock_irqsave(&srq->q_lock, flags);
2263 ocrdma_srq_toggle_bit(srq, wqe_idx); 2280 ocrdma_srq_toggle_bit(srq, wqe_idx);