aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ocrdma
diff options
context:
space:
mode:
authorNaresh Gottumukkala <bgottumukkala@emulex.com>2013-08-07 03:22:32 -0400
committerRoland Dreier <roland@purestorage.com>2013-08-12 13:58:37 -0400
commitf99b1649dbb6342d618307faef1f214fd54928b9 (patch)
tree29eeeb358a40d6d5021c90cc845e5e329043b832 /drivers/infiniband/hw/ocrdma
parentc095ba7224d8edc71dcef0d655911399a8bd4a3f (diff)
RDMA/ocrdma: Style and redundant code cleanup
Code cleanup and remove redundant code: 1) redundant initialization removed 2) braces changed as per CodingStyle. 3) redundant checks removed 4) extra braces in return statements removed. 5) removed unused pd pointer from mr. 6) reorganized get_dma_mr() 7) fixed set_av() to return error on invalid sgid index. 8) reference to ocrdma_dev removed from struct ocrdma_pd. Signed-off-by: Naresh Gottumukkala <bgottumukkala@emulex.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband/hw/ocrdma')
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c75
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c104
4 files changed, 96 insertions, 90 deletions
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index d540180a8e42..5c00600135ed 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -317,7 +317,6 @@ struct ocrdma_mr {
317 struct ib_mr ibmr; 317 struct ib_mr ibmr;
318 struct ib_umem *umem; 318 struct ib_umem *umem;
319 struct ocrdma_hw_mr hwmr; 319 struct ocrdma_hw_mr hwmr;
320 struct ocrdma_pd *pd;
321}; 320};
322 321
323struct ocrdma_ucontext { 322struct ocrdma_ucontext {
@@ -393,7 +392,7 @@ static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe)
393{ 392{
394 int cqe_valid; 393 int cqe_valid;
395 cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID; 394 cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID;
396 return ((cqe_valid == cq->phase) ? 1 : 0); 395 return (cqe_valid == cq->phase);
397} 396}
398 397
399static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe) 398static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index f4c587c68f64..a6bb3d074d2d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -92,7 +92,7 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
92 int status; 92 int status;
93 struct ocrdma_ah *ah; 93 struct ocrdma_ah *ah;
94 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); 94 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
95 struct ocrdma_dev *dev = pd->dev; 95 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
96 96
97 if (!(attr->ah_flags & IB_AH_GRH)) 97 if (!(attr->ah_flags & IB_AH_GRH))
98 return ERR_PTR(-EINVAL); 98 return ERR_PTR(-EINVAL);
@@ -100,7 +100,7 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
100 ah = kzalloc(sizeof *ah, GFP_ATOMIC); 100 ah = kzalloc(sizeof *ah, GFP_ATOMIC);
101 if (!ah) 101 if (!ah)
102 return ERR_PTR(-ENOMEM); 102 return ERR_PTR(-ENOMEM);
103 ah->dev = pd->dev; 103 ah->dev = dev;
104 104
105 status = ocrdma_alloc_av(dev, ah); 105 status = ocrdma_alloc_av(dev, ah);
106 if (status) 106 if (status)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 0965278dd2ed..eb41a1c9ad69 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -94,7 +94,7 @@ enum cqe_status {
94 94
95static inline void *ocrdma_get_eqe(struct ocrdma_eq *eq) 95static inline void *ocrdma_get_eqe(struct ocrdma_eq *eq)
96{ 96{
97 return (u8 *)eq->q.va + (eq->q.tail * sizeof(struct ocrdma_eqe)); 97 return eq->q.va + (eq->q.tail * sizeof(struct ocrdma_eqe));
98} 98}
99 99
100static inline void ocrdma_eq_inc_tail(struct ocrdma_eq *eq) 100static inline void ocrdma_eq_inc_tail(struct ocrdma_eq *eq)
@@ -105,8 +105,7 @@ static inline void ocrdma_eq_inc_tail(struct ocrdma_eq *eq)
105static inline void *ocrdma_get_mcqe(struct ocrdma_dev *dev) 105static inline void *ocrdma_get_mcqe(struct ocrdma_dev *dev)
106{ 106{
107 struct ocrdma_mcqe *cqe = (struct ocrdma_mcqe *) 107 struct ocrdma_mcqe *cqe = (struct ocrdma_mcqe *)
108 ((u8 *) dev->mq.cq.va + 108 (dev->mq.cq.va + (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe)));
109 (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe)));
110 109
111 if (!(le32_to_cpu(cqe->valid_ae_cmpl_cons) & OCRDMA_MCQE_VALID_MASK)) 110 if (!(le32_to_cpu(cqe->valid_ae_cmpl_cons) & OCRDMA_MCQE_VALID_MASK))
112 return NULL; 111 return NULL;
@@ -120,9 +119,7 @@ static inline void ocrdma_mcq_inc_tail(struct ocrdma_dev *dev)
120 119
121static inline struct ocrdma_mqe *ocrdma_get_mqe(struct ocrdma_dev *dev) 120static inline struct ocrdma_mqe *ocrdma_get_mqe(struct ocrdma_dev *dev)
122{ 121{
123 return (struct ocrdma_mqe *)((u8 *) dev->mq.sq.va + 122 return dev->mq.sq.va + (dev->mq.sq.head * sizeof(struct ocrdma_mqe));
124 (dev->mq.sq.head *
125 sizeof(struct ocrdma_mqe)));
126} 123}
127 124
128static inline void ocrdma_mq_inc_head(struct ocrdma_dev *dev) 125static inline void ocrdma_mq_inc_head(struct ocrdma_dev *dev)
@@ -132,8 +129,7 @@ static inline void ocrdma_mq_inc_head(struct ocrdma_dev *dev)
132 129
133static inline void *ocrdma_get_mqe_rsp(struct ocrdma_dev *dev) 130static inline void *ocrdma_get_mqe_rsp(struct ocrdma_dev *dev)
134{ 131{
135 return (void *)((u8 *) dev->mq.sq.va + 132 return dev->mq.sq.va + (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe));
136 (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe)));
137} 133}
138 134
139enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps) 135enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps)
@@ -181,7 +177,7 @@ static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps)
181 177
182static int ocrdma_get_mbx_errno(u32 status) 178static int ocrdma_get_mbx_errno(u32 status)
183{ 179{
184 int err_num = -EFAULT; 180 int err_num;
185 u8 mbox_status = (status & OCRDMA_MBX_RSP_STATUS_MASK) >> 181 u8 mbox_status = (status & OCRDMA_MBX_RSP_STATUS_MASK) >>
186 OCRDMA_MBX_RSP_STATUS_SHIFT; 182 OCRDMA_MBX_RSP_STATUS_SHIFT;
187 u8 add_status = (status & OCRDMA_MBX_RSP_ASTATUS_MASK) >> 183 u8 add_status = (status & OCRDMA_MBX_RSP_ASTATUS_MASK) >>
@@ -438,9 +434,9 @@ static int ocrdma_mbx_create_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
438 NULL); 434 NULL);
439 if (!status) { 435 if (!status) {
440 eq->q.id = rsp->vector_eqid & 0xffff; 436 eq->q.id = rsp->vector_eqid & 0xffff;
441 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) 437 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
442 ocrdma_assign_eq_vect_gen2(dev, eq); 438 ocrdma_assign_eq_vect_gen2(dev, eq);
443 else { 439 } else {
444 eq->vector = (rsp->vector_eqid >> 16) & 0xffff; 440 eq->vector = (rsp->vector_eqid >> 16) & 0xffff;
445 dev->nic_info.msix.start_vector += 1; 441 dev->nic_info.msix.start_vector += 1;
446 } 442 }
@@ -746,8 +742,9 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
746 qp->srq->ibsrq.event_handler(&ib_evt, 742 qp->srq->ibsrq.event_handler(&ib_evt,
747 qp->srq->ibsrq. 743 qp->srq->ibsrq.
748 srq_context); 744 srq_context);
749 } else if (dev_event) 745 } else if (dev_event) {
750 ib_dispatch_event(&ib_evt); 746 ib_dispatch_event(&ib_evt);
747 }
751 748
752} 749}
753 750
@@ -957,9 +954,8 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
957 rsp = ocrdma_get_mqe_rsp(dev); 954 rsp = ocrdma_get_mqe_rsp(dev);
958 ocrdma_copy_le32_to_cpu(mqe, rsp, (sizeof(*mqe))); 955 ocrdma_copy_le32_to_cpu(mqe, rsp, (sizeof(*mqe)));
959 if (cqe_status || ext_status) { 956 if (cqe_status || ext_status) {
960 pr_err 957 pr_err("%s() opcode=0x%x, cqe_status=0x%x, ext_status=0x%x\n",
961 ("%s() opcode=0x%x, cqe_status=0x%x, ext_status=0x%x\n", 958 __func__,
962 __func__,
963 (rsp->u.rsp.subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >> 959 (rsp->u.rsp.subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
964 OCRDMA_MBX_RSP_OPCODE_SHIFT, cqe_status, ext_status); 960 OCRDMA_MBX_RSP_OPCODE_SHIFT, cqe_status, ext_status);
965 status = ocrdma_get_mbx_cqe_errno(cqe_status); 961 status = ocrdma_get_mbx_cqe_errno(cqe_status);
@@ -1377,15 +1373,13 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
1377 cmd->cmd.pgsz_pgcnt |= hw_pages; 1373 cmd->cmd.pgsz_pgcnt |= hw_pages;
1378 cmd->cmd.ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS; 1374 cmd->cmd.ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
1379 1375
1380 if (dev->eq_cnt < 0)
1381 goto eq_err;
1382 cq->eqn = ocrdma_bind_eq(dev); 1376 cq->eqn = ocrdma_bind_eq(dev);
1383 cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER2; 1377 cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER2;
1384 cqe_count = cq->len / cqe_size; 1378 cqe_count = cq->len / cqe_size;
1385 if (cqe_count > 1024) 1379 if (cqe_count > 1024) {
1386 /* Set cnt to 3 to indicate more than 1024 cq entries */ 1380 /* Set cnt to 3 to indicate more than 1024 cq entries */
1387 cmd->cmd.ev_cnt_flags |= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT); 1381 cmd->cmd.ev_cnt_flags |= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT);
1388 else { 1382 } else {
1389 u8 count = 0; 1383 u8 count = 0;
1390 switch (cqe_count) { 1384 switch (cqe_count) {
1391 case 256: 1385 case 256:
@@ -1427,7 +1421,6 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
1427 return 0; 1421 return 0;
1428mbx_err: 1422mbx_err:
1429 ocrdma_unbind_eq(dev, cq->eqn); 1423 ocrdma_unbind_eq(dev, cq->eqn);
1430eq_err:
1431 dma_free_coherent(&pdev->dev, cq->len, cq->va, cq->pa); 1424 dma_free_coherent(&pdev->dev, cq->len, cq->va, cq->pa);
1432mem_err: 1425mem_err:
1433 kfree(cmd); 1426 kfree(cmd);
@@ -2057,9 +2050,10 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
2057 qp->rq_cq = cq; 2050 qp->rq_cq = cq;
2058 2051
2059 if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp && 2052 if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp &&
2060 (attrs->cap.max_inline_data <= dev->attr.max_inline_data)) 2053 (attrs->cap.max_inline_data <= dev->attr.max_inline_data)) {
2061 ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq, 2054 ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq,
2062 dpp_cq_id); 2055 dpp_cq_id);
2056 }
2063 2057
2064 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 2058 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2065 if (status) 2059 if (status)
@@ -2108,27 +2102,28 @@ int ocrdma_resolve_dgid(struct ocrdma_dev *dev, union ib_gid *dgid,
2108 struct in6_addr in6; 2102 struct in6_addr in6;
2109 2103
2110 memcpy(&in6, dgid, sizeof in6); 2104 memcpy(&in6, dgid, sizeof in6);
2111 if (rdma_is_multicast_addr(&in6)) 2105 if (rdma_is_multicast_addr(&in6)) {
2112 rdma_get_mcast_mac(&in6, mac_addr); 2106 rdma_get_mcast_mac(&in6, mac_addr);
2113 else if (rdma_link_local_addr(&in6)) 2107 } else if (rdma_link_local_addr(&in6)) {
2114 rdma_get_ll_mac(&in6, mac_addr); 2108 rdma_get_ll_mac(&in6, mac_addr);
2115 else { 2109 } else {
2116 pr_err("%s() fail to resolve mac_addr.\n", __func__); 2110 pr_err("%s() fail to resolve mac_addr.\n", __func__);
2117 return -EINVAL; 2111 return -EINVAL;
2118 } 2112 }
2119 return 0; 2113 return 0;
2120} 2114}
2121 2115
2122static void ocrdma_set_av_params(struct ocrdma_qp *qp, 2116static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2123 struct ocrdma_modify_qp *cmd, 2117 struct ocrdma_modify_qp *cmd,
2124 struct ib_qp_attr *attrs) 2118 struct ib_qp_attr *attrs)
2125{ 2119{
2120 int status;
2126 struct ib_ah_attr *ah_attr = &attrs->ah_attr; 2121 struct ib_ah_attr *ah_attr = &attrs->ah_attr;
2127 union ib_gid sgid; 2122 union ib_gid sgid;
2128 u32 vlan_id; 2123 u32 vlan_id;
2129 u8 mac_addr[6]; 2124 u8 mac_addr[6];
2130 if ((ah_attr->ah_flags & IB_AH_GRH) == 0) 2125 if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
2131 return; 2126 return -EINVAL;
2132 cmd->params.tclass_sq_psn |= 2127 cmd->params.tclass_sq_psn |=
2133 (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT); 2128 (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
2134 cmd->params.rnt_rc_sl_fl |= 2129 cmd->params.rnt_rc_sl_fl |=
@@ -2138,8 +2133,10 @@ static void ocrdma_set_av_params(struct ocrdma_qp *qp,
2138 cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID; 2133 cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
2139 memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0], 2134 memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
2140 sizeof(cmd->params.dgid)); 2135 sizeof(cmd->params.dgid));
2141 ocrdma_query_gid(&qp->dev->ibdev, 1, 2136 status = ocrdma_query_gid(&qp->dev->ibdev, 1,
2142 ah_attr->grh.sgid_index, &sgid); 2137 ah_attr->grh.sgid_index, &sgid);
2138 if (status)
2139 return status;
2143 qp->sgid_idx = ah_attr->grh.sgid_index; 2140 qp->sgid_idx = ah_attr->grh.sgid_index;
2144 memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid)); 2141 memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
2145 ocrdma_resolve_dgid(qp->dev, &ah_attr->grh.dgid, &mac_addr[0]); 2142 ocrdma_resolve_dgid(qp->dev, &ah_attr->grh.dgid, &mac_addr[0]);
@@ -2155,6 +2152,7 @@ static void ocrdma_set_av_params(struct ocrdma_qp *qp,
2155 vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT; 2152 vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
2156 cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID; 2153 cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
2157 } 2154 }
2155 return 0;
2158} 2156}
2159 2157
2160static int ocrdma_set_qp_params(struct ocrdma_qp *qp, 2158static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
@@ -2176,9 +2174,11 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2176 cmd->params.qkey = attrs->qkey; 2174 cmd->params.qkey = attrs->qkey;
2177 cmd->flags |= OCRDMA_QP_PARA_QKEY_VALID; 2175 cmd->flags |= OCRDMA_QP_PARA_QKEY_VALID;
2178 } 2176 }
2179 if (attr_mask & IB_QP_AV) 2177 if (attr_mask & IB_QP_AV) {
2180 ocrdma_set_av_params(qp, cmd, attrs); 2178 status = ocrdma_set_av_params(qp, cmd, attrs);
2181 else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) { 2179 if (status)
2180 return status;
2181 } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
2182 /* set the default mac address for UD, GSI QPs */ 2182 /* set the default mac address for UD, GSI QPs */
2183 cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] | 2183 cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] |
2184 (qp->dev->nic_info.mac_addr[1] << 8) | 2184 (qp->dev->nic_info.mac_addr[1] << 8) |
@@ -2283,10 +2283,12 @@ int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
2283 OCRDMA_QP_PARAMS_STATE_SHIFT) & 2283 OCRDMA_QP_PARAMS_STATE_SHIFT) &
2284 OCRDMA_QP_PARAMS_STATE_MASK; 2284 OCRDMA_QP_PARAMS_STATE_MASK;
2285 cmd->flags |= OCRDMA_QP_PARA_QPS_VALID; 2285 cmd->flags |= OCRDMA_QP_PARA_QPS_VALID;
2286 } else 2286 } else {
2287 cmd->params.max_sge_recv_flags |= 2287 cmd->params.max_sge_recv_flags |=
2288 (qp->state << OCRDMA_QP_PARAMS_STATE_SHIFT) & 2288 (qp->state << OCRDMA_QP_PARAMS_STATE_SHIFT) &
2289 OCRDMA_QP_PARAMS_STATE_MASK; 2289 OCRDMA_QP_PARAMS_STATE_MASK;
2290 }
2291
2290 status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask, old_qps); 2292 status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask, old_qps);
2291 if (status) 2293 if (status)
2292 goto mbx_err; 2294 goto mbx_err;
@@ -2497,9 +2499,9 @@ static int ocrdma_create_mq_eq(struct ocrdma_dev *dev)
2497 unsigned long flags = 0; 2499 unsigned long flags = 0;
2498 int num_eq = 0; 2500 int num_eq = 0;
2499 2501
2500 if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) 2502 if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) {
2501 flags = IRQF_SHARED; 2503 flags = IRQF_SHARED;
2502 else { 2504 } else {
2503 num_eq = dev->nic_info.msix.num_vectors - 2505 num_eq = dev->nic_info.msix.num_vectors -
2504 dev->nic_info.msix.start_vector; 2506 dev->nic_info.msix.start_vector;
2505 /* minimum two vectors/eq are required for rdma to work. 2507 /* minimum two vectors/eq are required for rdma to work.
@@ -2532,8 +2534,10 @@ static int ocrdma_create_qp_eqs(struct ocrdma_dev *dev)
2532 if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) { 2534 if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) {
2533 num_eq = 1; 2535 num_eq = 1;
2534 flags = IRQF_SHARED; 2536 flags = IRQF_SHARED;
2535 } else 2537 } else {
2536 num_eq = min_t(u32, num_eq, num_online_cpus()); 2538 num_eq = min_t(u32, num_eq, num_online_cpus());
2539 }
2540
2537 dev->qp_eq_tbl = kzalloc(sizeof(struct ocrdma_eq) * num_eq, GFP_KERNEL); 2541 dev->qp_eq_tbl = kzalloc(sizeof(struct ocrdma_eq) * num_eq, GFP_KERNEL);
2538 if (!dev->qp_eq_tbl) 2542 if (!dev->qp_eq_tbl)
2539 return -ENOMEM; 2543 return -ENOMEM;
@@ -2561,8 +2565,7 @@ static int ocrdma_create_qp_eqs(struct ocrdma_dev *dev)
2561 /* one eq is sufficient for data path to work */ 2565 /* one eq is sufficient for data path to work */
2562 if (dev->eq_cnt >= 1) 2566 if (dev->eq_cnt >= 1)
2563 return 0; 2567 return 0;
2564 if (status) 2568 ocrdma_destroy_qp_eqs(dev);
2565 ocrdma_destroy_qp_eqs(dev);
2566 return status; 2569 return status;
2567} 2570}
2568 2571
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index f36630e4b6be..77fc50a5cc93 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -337,20 +337,21 @@ static int ocrdma_copy_pd_uresp(struct ocrdma_pd *pd,
337 u32 db_page_size; 337 u32 db_page_size;
338 struct ocrdma_alloc_pd_uresp rsp; 338 struct ocrdma_alloc_pd_uresp rsp;
339 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx); 339 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
340 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
340 341
341 memset(&rsp, 0, sizeof(rsp)); 342 memset(&rsp, 0, sizeof(rsp));
342 rsp.id = pd->id; 343 rsp.id = pd->id;
343 rsp.dpp_enabled = pd->dpp_enabled; 344 rsp.dpp_enabled = pd->dpp_enabled;
344 db_page_addr = pd->dev->nic_info.unmapped_db + 345 db_page_addr = dev->nic_info.unmapped_db +
345 (pd->id * pd->dev->nic_info.db_page_size); 346 (pd->id * dev->nic_info.db_page_size);
346 db_page_size = pd->dev->nic_info.db_page_size; 347 db_page_size = dev->nic_info.db_page_size;
347 348
348 status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size); 349 status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
349 if (status) 350 if (status)
350 return status; 351 return status;
351 352
352 if (pd->dpp_enabled) { 353 if (pd->dpp_enabled) {
353 dpp_page_addr = pd->dev->nic_info.dpp_unmapped_addr + 354 dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
354 (pd->id * OCRDMA_DPP_PAGE_SIZE); 355 (pd->id * OCRDMA_DPP_PAGE_SIZE);
355 status = ocrdma_add_mmap(uctx, dpp_page_addr, 356 status = ocrdma_add_mmap(uctx, dpp_page_addr,
356 OCRDMA_DPP_PAGE_SIZE); 357 OCRDMA_DPP_PAGE_SIZE);
@@ -386,10 +387,9 @@ struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
386 pd = kzalloc(sizeof(*pd), GFP_KERNEL); 387 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
387 if (!pd) 388 if (!pd)
388 return ERR_PTR(-ENOMEM); 389 return ERR_PTR(-ENOMEM);
389 pd->dev = dev;
390 if (udata && context) { 390 if (udata && context) {
391 pd->dpp_enabled = (dev->nic_info.dev_family == 391 pd->dpp_enabled =
392 OCRDMA_GEN2_FAMILY) ? true : false; 392 (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY);
393 pd->num_dpp_qp = 393 pd->num_dpp_qp =
394 pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0; 394 pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0;
395 } 395 }
@@ -414,7 +414,7 @@ err:
414int ocrdma_dealloc_pd(struct ib_pd *ibpd) 414int ocrdma_dealloc_pd(struct ib_pd *ibpd)
415{ 415{
416 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); 416 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
417 struct ocrdma_dev *dev = pd->dev; 417 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
418 int status; 418 int status;
419 u64 usr_db; 419 u64 usr_db;
420 420
@@ -432,25 +432,12 @@ int ocrdma_dealloc_pd(struct ib_pd *ibpd)
432 return status; 432 return status;
433} 433}
434 434
435static struct ocrdma_mr *ocrdma_alloc_lkey(struct ib_pd *ibpd, 435static int ocrdma_alloc_lkey(struct ocrdma_mr *mr, u32 pdid, int acc,
436 int acc, u32 num_pbls, 436 u32 num_pbls, u32 addr_check)
437 u32 addr_check)
438{ 437{
439 int status; 438 int status;
440 struct ocrdma_mr *mr; 439 struct ocrdma_dev *dev = mr->hwmr.dev;
441 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
442 struct ocrdma_dev *dev = pd->dev;
443
444 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
445 pr_err("%s(%d) leaving err, invalid access rights\n",
446 __func__, dev->id);
447 return ERR_PTR(-EINVAL);
448 }
449 440
450 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
451 if (!mr)
452 return ERR_PTR(-ENOMEM);
453 mr->hwmr.dev = dev;
454 mr->hwmr.fr_mr = 0; 441 mr->hwmr.fr_mr = 0;
455 mr->hwmr.local_rd = 1; 442 mr->hwmr.local_rd = 1;
456 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; 443 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
@@ -460,25 +447,39 @@ static struct ocrdma_mr *ocrdma_alloc_lkey(struct ib_pd *ibpd,
460 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; 447 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
461 mr->hwmr.num_pbls = num_pbls; 448 mr->hwmr.num_pbls = num_pbls;
462 449
463 status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pd->id, addr_check); 450 status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check);
464 if (status) { 451 if (status)
465 kfree(mr); 452 return status;
466 return ERR_PTR(-ENOMEM); 453
467 }
468 mr->pd = pd;
469 mr->ibmr.lkey = mr->hwmr.lkey; 454 mr->ibmr.lkey = mr->hwmr.lkey;
470 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) 455 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
471 mr->ibmr.rkey = mr->hwmr.lkey; 456 mr->ibmr.rkey = mr->hwmr.lkey;
472 return mr; 457 return 0;
473} 458}
474 459
475struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc) 460struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
476{ 461{
462 int status;
477 struct ocrdma_mr *mr; 463 struct ocrdma_mr *mr;
464 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
465 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
466
467 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
468 pr_err("%s err, invalid access rights\n", __func__);
469 return ERR_PTR(-EINVAL);
470 }
478 471
479 mr = ocrdma_alloc_lkey(ibpd, acc, 0, OCRDMA_ADDR_CHECK_DISABLE); 472 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
480 if (IS_ERR(mr)) 473 if (!mr)
481 return ERR_CAST(mr); 474 return ERR_PTR(-ENOMEM);
475
476 mr->hwmr.dev = dev;
477 status = ocrdma_alloc_lkey(mr, pd->id, acc, 0,
478 OCRDMA_ADDR_CHECK_DISABLE);
479 if (status) {
480 kfree(mr);
481 return ERR_PTR(status);
482 }
482 483
483 return &mr->ibmr; 484 return &mr->ibmr;
484} 485}
@@ -613,13 +614,12 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
613 u64 usr_addr, int acc, struct ib_udata *udata) 614 u64 usr_addr, int acc, struct ib_udata *udata)
614{ 615{
615 int status = -ENOMEM; 616 int status = -ENOMEM;
616 struct ocrdma_dev *dev; 617 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
617 struct ocrdma_mr *mr; 618 struct ocrdma_mr *mr;
618 struct ocrdma_pd *pd; 619 struct ocrdma_pd *pd;
619 u32 num_pbes; 620 u32 num_pbes;
620 621
621 pd = get_ocrdma_pd(ibpd); 622 pd = get_ocrdma_pd(ibpd);
622 dev = pd->dev;
623 623
624 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) 624 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
625 return ERR_PTR(-EINVAL); 625 return ERR_PTR(-EINVAL);
@@ -654,7 +654,6 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
654 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc); 654 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
655 if (status) 655 if (status)
656 goto mbx_err; 656 goto mbx_err;
657 mr->pd = pd;
658 mr->ibmr.lkey = mr->hwmr.lkey; 657 mr->ibmr.lkey = mr->hwmr.lkey;
659 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) 658 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
660 mr->ibmr.rkey = mr->hwmr.lkey; 659 mr->ibmr.rkey = mr->hwmr.lkey;
@@ -1026,7 +1025,7 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1026 int status; 1025 int status;
1027 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); 1026 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1028 struct ocrdma_qp *qp; 1027 struct ocrdma_qp *qp;
1029 struct ocrdma_dev *dev = pd->dev; 1028 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
1030 struct ocrdma_create_qp_ureq ureq; 1029 struct ocrdma_create_qp_ureq ureq;
1031 u16 dpp_credit_lmt, dpp_offset; 1030 u16 dpp_credit_lmt, dpp_offset;
1032 1031
@@ -1360,17 +1359,18 @@ static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
1360 */ 1359 */
1361 discard_cnt += 1; 1360 discard_cnt += 1;
1362 cqe->cmn.qpn = 0; 1361 cqe->cmn.qpn = 0;
1363 if (is_cqe_for_sq(cqe)) 1362 if (is_cqe_for_sq(cqe)) {
1364 ocrdma_hwq_inc_tail(&qp->sq); 1363 ocrdma_hwq_inc_tail(&qp->sq);
1365 else { 1364 } else {
1366 if (qp->srq) { 1365 if (qp->srq) {
1367 spin_lock_irqsave(&qp->srq->q_lock, flags); 1366 spin_lock_irqsave(&qp->srq->q_lock, flags);
1368 ocrdma_hwq_inc_tail(&qp->srq->rq); 1367 ocrdma_hwq_inc_tail(&qp->srq->rq);
1369 ocrdma_srq_toggle_bit(qp->srq, cur_getp); 1368 ocrdma_srq_toggle_bit(qp->srq, cur_getp);
1370 spin_unlock_irqrestore(&qp->srq->q_lock, flags); 1369 spin_unlock_irqrestore(&qp->srq->q_lock, flags);
1371 1370
1372 } else 1371 } else {
1373 ocrdma_hwq_inc_tail(&qp->rq); 1372 ocrdma_hwq_inc_tail(&qp->rq);
1373 }
1374 } 1374 }
1375skip_cqe: 1375skip_cqe:
1376 cur_getp = (cur_getp + 1) % cq->max_hw_cqe; 1376 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
@@ -1495,7 +1495,7 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
1495{ 1495{
1496 int status = -ENOMEM; 1496 int status = -ENOMEM;
1497 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); 1497 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1498 struct ocrdma_dev *dev = pd->dev; 1498 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
1499 struct ocrdma_srq *srq; 1499 struct ocrdma_srq *srq;
1500 1500
1501 if (init_attr->attr.max_sge > dev->attr.max_recv_sge) 1501 if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
@@ -1675,8 +1675,9 @@ static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1675 ocrdma_build_ud_hdr(qp, hdr, wr); 1675 ocrdma_build_ud_hdr(qp, hdr, wr);
1676 sge = (struct ocrdma_sge *)(hdr + 2); 1676 sge = (struct ocrdma_sge *)(hdr + 2);
1677 wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr); 1677 wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
1678 } else 1678 } else {
1679 sge = (struct ocrdma_sge *)(hdr + 1); 1679 sge = (struct ocrdma_sge *)(hdr + 1);
1680 }
1680 1681
1681 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); 1682 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
1682 return status; 1683 return status;
@@ -1958,7 +1959,7 @@ int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
1958 1959
1959static enum ib_wc_status ocrdma_to_ibwc_err(u16 status) 1960static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
1960{ 1961{
1961 enum ib_wc_status ibwc_status = IB_WC_GENERAL_ERR; 1962 enum ib_wc_status ibwc_status;
1962 1963
1963 switch (status) { 1964 switch (status) {
1964 case OCRDMA_CQE_GENERAL_ERR: 1965 case OCRDMA_CQE_GENERAL_ERR:
@@ -2299,9 +2300,9 @@ static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
2299 ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt); 2300 ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
2300 ibwc->wc_flags |= IB_WC_WITH_INVALIDATE; 2301 ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
2301 } 2302 }
2302 if (qp->ibqp.srq) 2303 if (qp->ibqp.srq) {
2303 ocrdma_update_free_srq_cqe(ibwc, cqe, qp); 2304 ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
2304 else { 2305 } else {
2305 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; 2306 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2306 ocrdma_hwq_inc_tail(&qp->rq); 2307 ocrdma_hwq_inc_tail(&qp->rq);
2307 } 2308 }
@@ -2314,13 +2315,14 @@ static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2314 bool expand = false; 2315 bool expand = false;
2315 2316
2316 ibwc->wc_flags = 0; 2317 ibwc->wc_flags = 0;
2317 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) 2318 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2318 status = (le32_to_cpu(cqe->flags_status_srcqpn) & 2319 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2319 OCRDMA_CQE_UD_STATUS_MASK) >> 2320 OCRDMA_CQE_UD_STATUS_MASK) >>
2320 OCRDMA_CQE_UD_STATUS_SHIFT; 2321 OCRDMA_CQE_UD_STATUS_SHIFT;
2321 else 2322 } else {
2322 status = (le32_to_cpu(cqe->flags_status_srcqpn) & 2323 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2323 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; 2324 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2325 }
2324 2326
2325 if (status == OCRDMA_CQE_SUCCESS) { 2327 if (status == OCRDMA_CQE_SUCCESS) {
2326 *polled = true; 2328 *polled = true;
@@ -2338,9 +2340,10 @@ static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
2338 if (cq->phase_change) { 2340 if (cq->phase_change) {
2339 if (cur_getp == 0) 2341 if (cur_getp == 0)
2340 cq->phase = (~cq->phase & OCRDMA_CQE_VALID); 2342 cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
2341 } else 2343 } else {
2342 /* clear valid bit */ 2344 /* clear valid bit */
2343 cqe->flags_status_srcqpn = 0; 2345 cqe->flags_status_srcqpn = 0;
2346 }
2344} 2347}
2345 2348
2346static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries, 2349static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
@@ -2417,8 +2420,9 @@ static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
2417 } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) { 2420 } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
2418 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; 2421 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2419 ocrdma_hwq_inc_tail(&qp->rq); 2422 ocrdma_hwq_inc_tail(&qp->rq);
2420 } else 2423 } else {
2421 return err_cqes; 2424 return err_cqes;
2425 }
2422 ibwc->byte_len = 0; 2426 ibwc->byte_len = 0;
2423 ibwc->status = IB_WC_WR_FLUSH_ERR; 2427 ibwc->status = IB_WC_WR_FLUSH_ERR;
2424 ibwc = ibwc + 1; 2428 ibwc = ibwc + 1;