aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/ocrdma/ocrdma_verbs.c')
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c135
1 files changed, 49 insertions, 86 deletions
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index b29a4246ef41..dcfbab177faa 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -114,8 +114,8 @@ int ocrdma_query_port(struct ib_device *ibdev,
114 114
115 dev = get_ocrdma_dev(ibdev); 115 dev = get_ocrdma_dev(ibdev);
116 if (port > 1) { 116 if (port > 1) {
117 ocrdma_err("%s(%d) invalid_port=0x%x\n", __func__, 117 pr_err("%s(%d) invalid_port=0x%x\n", __func__,
118 dev->id, port); 118 dev->id, port);
119 return -EINVAL; 119 return -EINVAL;
120 } 120 }
121 netdev = dev->nic_info.netdev; 121 netdev = dev->nic_info.netdev;
@@ -155,8 +155,7 @@ int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
155 155
156 dev = get_ocrdma_dev(ibdev); 156 dev = get_ocrdma_dev(ibdev);
157 if (port > 1) { 157 if (port > 1) {
158 ocrdma_err("%s(%d) invalid_port=0x%x\n", __func__, 158 pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port);
159 dev->id, port);
160 return -EINVAL; 159 return -EINVAL;
161 } 160 }
162 return 0; 161 return 0;
@@ -398,7 +397,6 @@ struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
398 kfree(pd); 397 kfree(pd);
399 return ERR_PTR(status); 398 return ERR_PTR(status);
400 } 399 }
401 atomic_set(&pd->use_cnt, 0);
402 400
403 if (udata && context) { 401 if (udata && context) {
404 status = ocrdma_copy_pd_uresp(pd, context, udata); 402 status = ocrdma_copy_pd_uresp(pd, context, udata);
@@ -419,12 +417,6 @@ int ocrdma_dealloc_pd(struct ib_pd *ibpd)
419 int status; 417 int status;
420 u64 usr_db; 418 u64 usr_db;
421 419
422 if (atomic_read(&pd->use_cnt)) {
423 ocrdma_err("%s(%d) pd=0x%x is in use.\n",
424 __func__, dev->id, pd->id);
425 status = -EFAULT;
426 goto dealloc_err;
427 }
428 status = ocrdma_mbx_dealloc_pd(dev, pd); 420 status = ocrdma_mbx_dealloc_pd(dev, pd);
429 if (pd->uctx) { 421 if (pd->uctx) {
430 u64 dpp_db = dev->nic_info.dpp_unmapped_addr + 422 u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
@@ -436,7 +428,6 @@ int ocrdma_dealloc_pd(struct ib_pd *ibpd)
436 ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size); 428 ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
437 } 429 }
438 kfree(pd); 430 kfree(pd);
439dealloc_err:
440 return status; 431 return status;
441} 432}
442 433
@@ -450,8 +441,8 @@ static struct ocrdma_mr *ocrdma_alloc_lkey(struct ib_pd *ibpd,
450 struct ocrdma_dev *dev = pd->dev; 441 struct ocrdma_dev *dev = pd->dev;
451 442
452 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) { 443 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
453 ocrdma_err("%s(%d) leaving err, invalid access rights\n", 444 pr_err("%s(%d) leaving err, invalid access rights\n",
454 __func__, dev->id); 445 __func__, dev->id);
455 return ERR_PTR(-EINVAL); 446 return ERR_PTR(-EINVAL);
456 } 447 }
457 448
@@ -474,7 +465,6 @@ static struct ocrdma_mr *ocrdma_alloc_lkey(struct ib_pd *ibpd,
474 return ERR_PTR(-ENOMEM); 465 return ERR_PTR(-ENOMEM);
475 } 466 }
476 mr->pd = pd; 467 mr->pd = pd;
477 atomic_inc(&pd->use_cnt);
478 mr->ibmr.lkey = mr->hwmr.lkey; 468 mr->ibmr.lkey = mr->hwmr.lkey;
479 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) 469 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
480 mr->ibmr.rkey = mr->hwmr.lkey; 470 mr->ibmr.rkey = mr->hwmr.lkey;
@@ -664,7 +654,6 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
664 if (status) 654 if (status)
665 goto mbx_err; 655 goto mbx_err;
666 mr->pd = pd; 656 mr->pd = pd;
667 atomic_inc(&pd->use_cnt);
668 mr->ibmr.lkey = mr->hwmr.lkey; 657 mr->ibmr.lkey = mr->hwmr.lkey;
669 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) 658 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
670 mr->ibmr.rkey = mr->hwmr.lkey; 659 mr->ibmr.rkey = mr->hwmr.lkey;
@@ -689,7 +678,6 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
689 if (mr->hwmr.fr_mr == 0) 678 if (mr->hwmr.fr_mr == 0)
690 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); 679 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
691 680
692 atomic_dec(&mr->pd->use_cnt);
693 /* it could be user registered memory. */ 681 /* it could be user registered memory. */
694 if (mr->umem) 682 if (mr->umem)
695 ib_umem_release(mr->umem); 683 ib_umem_release(mr->umem);
@@ -714,8 +702,8 @@ static int ocrdma_copy_cq_uresp(struct ocrdma_cq *cq, struct ib_udata *udata,
714 uresp.phase_change = cq->phase_change ? 1 : 0; 702 uresp.phase_change = cq->phase_change ? 1 : 0;
715 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 703 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
716 if (status) { 704 if (status) {
717 ocrdma_err("%s(%d) copy error cqid=0x%x.\n", 705 pr_err("%s(%d) copy error cqid=0x%x.\n",
718 __func__, cq->dev->id, cq->id); 706 __func__, cq->dev->id, cq->id);
719 goto err; 707 goto err;
720 } 708 }
721 uctx = get_ocrdma_ucontext(ib_ctx); 709 uctx = get_ocrdma_ucontext(ib_ctx);
@@ -752,7 +740,6 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector,
752 740
753 spin_lock_init(&cq->cq_lock); 741 spin_lock_init(&cq->cq_lock);
754 spin_lock_init(&cq->comp_handler_lock); 742 spin_lock_init(&cq->comp_handler_lock);
755 atomic_set(&cq->use_cnt, 0);
756 INIT_LIST_HEAD(&cq->sq_head); 743 INIT_LIST_HEAD(&cq->sq_head);
757 INIT_LIST_HEAD(&cq->rq_head); 744 INIT_LIST_HEAD(&cq->rq_head);
758 cq->dev = dev; 745 cq->dev = dev;
@@ -799,9 +786,6 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq)
799 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); 786 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
800 struct ocrdma_dev *dev = cq->dev; 787 struct ocrdma_dev *dev = cq->dev;
801 788
802 if (atomic_read(&cq->use_cnt))
803 return -EINVAL;
804
805 status = ocrdma_mbx_destroy_cq(dev, cq); 789 status = ocrdma_mbx_destroy_cq(dev, cq);
806 790
807 if (cq->ucontext) { 791 if (cq->ucontext) {
@@ -837,57 +821,56 @@ static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
837 if (attrs->qp_type != IB_QPT_GSI && 821 if (attrs->qp_type != IB_QPT_GSI &&
838 attrs->qp_type != IB_QPT_RC && 822 attrs->qp_type != IB_QPT_RC &&
839 attrs->qp_type != IB_QPT_UD) { 823 attrs->qp_type != IB_QPT_UD) {
840 ocrdma_err("%s(%d) unsupported qp type=0x%x requested\n", 824 pr_err("%s(%d) unsupported qp type=0x%x requested\n",
841 __func__, dev->id, attrs->qp_type); 825 __func__, dev->id, attrs->qp_type);
842 return -EINVAL; 826 return -EINVAL;
843 } 827 }
844 if (attrs->cap.max_send_wr > dev->attr.max_wqe) { 828 if (attrs->cap.max_send_wr > dev->attr.max_wqe) {
845 ocrdma_err("%s(%d) unsupported send_wr=0x%x requested\n", 829 pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
846 __func__, dev->id, attrs->cap.max_send_wr); 830 __func__, dev->id, attrs->cap.max_send_wr);
847 ocrdma_err("%s(%d) supported send_wr=0x%x\n", 831 pr_err("%s(%d) supported send_wr=0x%x\n",
848 __func__, dev->id, dev->attr.max_wqe); 832 __func__, dev->id, dev->attr.max_wqe);
849 return -EINVAL; 833 return -EINVAL;
850 } 834 }
851 if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) { 835 if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
852 ocrdma_err("%s(%d) unsupported recv_wr=0x%x requested\n", 836 pr_err("%s(%d) unsupported recv_wr=0x%x requested\n",
853 __func__, dev->id, attrs->cap.max_recv_wr); 837 __func__, dev->id, attrs->cap.max_recv_wr);
854 ocrdma_err("%s(%d) supported recv_wr=0x%x\n", 838 pr_err("%s(%d) supported recv_wr=0x%x\n",
855 __func__, dev->id, dev->attr.max_rqe); 839 __func__, dev->id, dev->attr.max_rqe);
856 return -EINVAL; 840 return -EINVAL;
857 } 841 }
858 if (attrs->cap.max_inline_data > dev->attr.max_inline_data) { 842 if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {
859 ocrdma_err("%s(%d) unsupported inline data size=0x%x" 843 pr_err("%s(%d) unsupported inline data size=0x%x requested\n",
860 " requested\n", __func__, dev->id, 844 __func__, dev->id, attrs->cap.max_inline_data);
861 attrs->cap.max_inline_data); 845 pr_err("%s(%d) supported inline data size=0x%x\n",
862 ocrdma_err("%s(%d) supported inline data size=0x%x\n", 846 __func__, dev->id, dev->attr.max_inline_data);
863 __func__, dev->id, dev->attr.max_inline_data);
864 return -EINVAL; 847 return -EINVAL;
865 } 848 }
866 if (attrs->cap.max_send_sge > dev->attr.max_send_sge) { 849 if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {
867 ocrdma_err("%s(%d) unsupported send_sge=0x%x requested\n", 850 pr_err("%s(%d) unsupported send_sge=0x%x requested\n",
868 __func__, dev->id, attrs->cap.max_send_sge); 851 __func__, dev->id, attrs->cap.max_send_sge);
869 ocrdma_err("%s(%d) supported send_sge=0x%x\n", 852 pr_err("%s(%d) supported send_sge=0x%x\n",
870 __func__, dev->id, dev->attr.max_send_sge); 853 __func__, dev->id, dev->attr.max_send_sge);
871 return -EINVAL; 854 return -EINVAL;
872 } 855 }
873 if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) { 856 if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {
874 ocrdma_err("%s(%d) unsupported recv_sge=0x%x requested\n", 857 pr_err("%s(%d) unsupported recv_sge=0x%x requested\n",
875 __func__, dev->id, attrs->cap.max_recv_sge); 858 __func__, dev->id, attrs->cap.max_recv_sge);
876 ocrdma_err("%s(%d) supported recv_sge=0x%x\n", 859 pr_err("%s(%d) supported recv_sge=0x%x\n",
877 __func__, dev->id, dev->attr.max_recv_sge); 860 __func__, dev->id, dev->attr.max_recv_sge);
878 return -EINVAL; 861 return -EINVAL;
879 } 862 }
880 /* unprivileged user space cannot create special QP */ 863 /* unprivileged user space cannot create special QP */
881 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) { 864 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
882 ocrdma_err 865 pr_err
883 ("%s(%d) Userspace can't create special QPs of type=0x%x\n", 866 ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
884 __func__, dev->id, attrs->qp_type); 867 __func__, dev->id, attrs->qp_type);
885 return -EINVAL; 868 return -EINVAL;
886 } 869 }
887 /* allow creating only one GSI type of QP */ 870 /* allow creating only one GSI type of QP */
888 if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) { 871 if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
889 ocrdma_err("%s(%d) GSI special QPs already created.\n", 872 pr_err("%s(%d) GSI special QPs already created.\n",
890 __func__, dev->id); 873 __func__, dev->id);
891 return -EINVAL; 874 return -EINVAL;
892 } 875 }
893 /* verify consumer QPs are not trying to use GSI QP's CQ */ 876 /* verify consumer QPs are not trying to use GSI QP's CQ */
@@ -896,8 +879,8 @@ static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
896 (dev->gsi_sqcq == get_ocrdma_cq(attrs->recv_cq)) || 879 (dev->gsi_sqcq == get_ocrdma_cq(attrs->recv_cq)) ||
897 (dev->gsi_rqcq == get_ocrdma_cq(attrs->send_cq)) || 880 (dev->gsi_rqcq == get_ocrdma_cq(attrs->send_cq)) ||
898 (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) { 881 (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
899 ocrdma_err("%s(%d) Consumer QP cannot use GSI CQs.\n", 882 pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
900 __func__, dev->id); 883 __func__, dev->id);
901 return -EINVAL; 884 return -EINVAL;
902 } 885 }
903 } 886 }
@@ -949,7 +932,7 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
949 } 932 }
950 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 933 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
951 if (status) { 934 if (status) {
952 ocrdma_err("%s(%d) user copy error.\n", __func__, dev->id); 935 pr_err("%s(%d) user copy error.\n", __func__, dev->id);
953 goto err; 936 goto err;
954 } 937 }
955 status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0], 938 status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],
@@ -1023,15 +1006,6 @@ static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
1023 qp->state = OCRDMA_QPS_RST; 1006 qp->state = OCRDMA_QPS_RST;
1024} 1007}
1025 1008
1026static void ocrdma_set_qp_use_cnt(struct ocrdma_qp *qp, struct ocrdma_pd *pd)
1027{
1028 atomic_inc(&pd->use_cnt);
1029 atomic_inc(&qp->sq_cq->use_cnt);
1030 atomic_inc(&qp->rq_cq->use_cnt);
1031 if (qp->srq)
1032 atomic_inc(&qp->srq->use_cnt);
1033 qp->ibqp.qp_num = qp->id;
1034}
1035 1009
1036static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev, 1010static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
1037 struct ib_qp_init_attr *attrs) 1011 struct ib_qp_init_attr *attrs)
@@ -1099,7 +1073,7 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1099 goto cpy_err; 1073 goto cpy_err;
1100 } 1074 }
1101 ocrdma_store_gsi_qp_cq(dev, attrs); 1075 ocrdma_store_gsi_qp_cq(dev, attrs);
1102 ocrdma_set_qp_use_cnt(qp, pd); 1076 qp->ibqp.qp_num = qp->id;
1103 mutex_unlock(&dev->dev_lock); 1077 mutex_unlock(&dev->dev_lock);
1104 return &qp->ibqp; 1078 return &qp->ibqp;
1105 1079
@@ -1112,7 +1086,7 @@ mbx_err:
1112 kfree(qp->wqe_wr_id_tbl); 1086 kfree(qp->wqe_wr_id_tbl);
1113 kfree(qp->rqe_wr_id_tbl); 1087 kfree(qp->rqe_wr_id_tbl);
1114 kfree(qp); 1088 kfree(qp);
1115 ocrdma_err("%s(%d) error=%d\n", __func__, dev->id, status); 1089 pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
1116gen_err: 1090gen_err:
1117 return ERR_PTR(status); 1091 return ERR_PTR(status);
1118} 1092}
@@ -1162,10 +1136,10 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1162 spin_unlock_irqrestore(&qp->q_lock, flags); 1136 spin_unlock_irqrestore(&qp->q_lock, flags);
1163 1137
1164 if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) { 1138 if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) {
1165 ocrdma_err("%s(%d) invalid attribute mask=0x%x specified for " 1139 pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
1166 "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n", 1140 "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
1167 __func__, dev->id, attr_mask, qp->id, ibqp->qp_type, 1141 __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
1168 old_qps, new_qps); 1142 old_qps, new_qps);
1169 goto param_err; 1143 goto param_err;
1170 } 1144 }
1171 1145
@@ -1475,11 +1449,6 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
1475 1449
1476 ocrdma_del_flush_qp(qp); 1450 ocrdma_del_flush_qp(qp);
1477 1451
1478 atomic_dec(&qp->pd->use_cnt);
1479 atomic_dec(&qp->sq_cq->use_cnt);
1480 atomic_dec(&qp->rq_cq->use_cnt);
1481 if (qp->srq)
1482 atomic_dec(&qp->srq->use_cnt);
1483 kfree(qp->wqe_wr_id_tbl); 1452 kfree(qp->wqe_wr_id_tbl);
1484 kfree(qp->rqe_wr_id_tbl); 1453 kfree(qp->rqe_wr_id_tbl);
1485 kfree(qp); 1454 kfree(qp);
@@ -1565,14 +1534,12 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
1565 goto arm_err; 1534 goto arm_err;
1566 } 1535 }
1567 1536
1568 atomic_set(&srq->use_cnt, 0);
1569 if (udata) { 1537 if (udata) {
1570 status = ocrdma_copy_srq_uresp(srq, udata); 1538 status = ocrdma_copy_srq_uresp(srq, udata);
1571 if (status) 1539 if (status)
1572 goto arm_err; 1540 goto arm_err;
1573 } 1541 }
1574 1542
1575 atomic_inc(&pd->use_cnt);
1576 return &srq->ibsrq; 1543 return &srq->ibsrq;
1577 1544
1578arm_err: 1545arm_err:
@@ -1618,18 +1585,12 @@ int ocrdma_destroy_srq(struct ib_srq *ibsrq)
1618 1585
1619 srq = get_ocrdma_srq(ibsrq); 1586 srq = get_ocrdma_srq(ibsrq);
1620 dev = srq->dev; 1587 dev = srq->dev;
1621 if (atomic_read(&srq->use_cnt)) {
1622 ocrdma_err("%s(%d) err, srq=0x%x in use\n",
1623 __func__, dev->id, srq->id);
1624 return -EAGAIN;
1625 }
1626 1588
1627 status = ocrdma_mbx_destroy_srq(dev, srq); 1589 status = ocrdma_mbx_destroy_srq(dev, srq);
1628 1590
1629 if (srq->pd->uctx) 1591 if (srq->pd->uctx)
1630 ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa, srq->rq.len); 1592 ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa, srq->rq.len);
1631 1593
1632 atomic_dec(&srq->pd->use_cnt);
1633 kfree(srq->idx_bit_fields); 1594 kfree(srq->idx_bit_fields);
1634 kfree(srq->rqe_wr_id_tbl); 1595 kfree(srq->rqe_wr_id_tbl);
1635 kfree(srq); 1596 kfree(srq);
@@ -1677,9 +1638,9 @@ static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
1677{ 1638{
1678 if (wr->send_flags & IB_SEND_INLINE) { 1639 if (wr->send_flags & IB_SEND_INLINE) {
1679 if (wr->sg_list[0].length > qp->max_inline_data) { 1640 if (wr->sg_list[0].length > qp->max_inline_data) {
1680 ocrdma_err("%s() supported_len=0x%x," 1641 pr_err("%s() supported_len=0x%x,\n"
1681 " unspported len req=0x%x\n", __func__, 1642 " unspported len req=0x%x\n", __func__,
1682 qp->max_inline_data, wr->sg_list[0].length); 1643 qp->max_inline_data, wr->sg_list[0].length);
1683 return -EINVAL; 1644 return -EINVAL;
1684 } 1645 }
1685 memcpy(sge, 1646 memcpy(sge,
@@ -1773,12 +1734,14 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1773 spin_lock_irqsave(&qp->q_lock, flags); 1734 spin_lock_irqsave(&qp->q_lock, flags);
1774 if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) { 1735 if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {
1775 spin_unlock_irqrestore(&qp->q_lock, flags); 1736 spin_unlock_irqrestore(&qp->q_lock, flags);
1737 *bad_wr = wr;
1776 return -EINVAL; 1738 return -EINVAL;
1777 } 1739 }
1778 1740
1779 while (wr) { 1741 while (wr) {
1780 if (ocrdma_hwq_free_cnt(&qp->sq) == 0 || 1742 if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
1781 wr->num_sge > qp->sq.max_sges) { 1743 wr->num_sge > qp->sq.max_sges) {
1744 *bad_wr = wr;
1782 status = -ENOMEM; 1745 status = -ENOMEM;
1783 break; 1746 break;
1784 } 1747 }
@@ -1856,7 +1819,7 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1856 1819
1857static void ocrdma_ring_rq_db(struct ocrdma_qp *qp) 1820static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
1858{ 1821{
1859 u32 val = qp->rq.dbid | (1 << OCRDMA_GET_NUM_POSTED_SHIFT_VAL(qp)); 1822 u32 val = qp->rq.dbid | (1 << ocrdma_get_num_posted_shift(qp));
1860 1823
1861 iowrite32(val, qp->rq_db); 1824 iowrite32(val, qp->rq_db);
1862} 1825}
@@ -2094,8 +2057,8 @@ static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
2094 break; 2057 break;
2095 default: 2058 default:
2096 ibwc->status = IB_WC_GENERAL_ERR; 2059 ibwc->status = IB_WC_GENERAL_ERR;
2097 ocrdma_err("%s() invalid opcode received = 0x%x\n", 2060 pr_err("%s() invalid opcode received = 0x%x\n",
2098 __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK); 2061 __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
2099 break; 2062 break;
2100 }; 2063 };
2101} 2064}