aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/ocrdma/ocrdma_verbs.c')
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c183
1 files changed, 147 insertions, 36 deletions
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index fb8d8c4dfbb9..877175563634 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -53,7 +53,7 @@ int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
53 53
54 dev = get_ocrdma_dev(ibdev); 54 dev = get_ocrdma_dev(ibdev);
55 memset(sgid, 0, sizeof(*sgid)); 55 memset(sgid, 0, sizeof(*sgid));
56 if (index > OCRDMA_MAX_SGID) 56 if (index >= OCRDMA_MAX_SGID)
57 return -EINVAL; 57 return -EINVAL;
58 58
59 memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid)); 59 memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
@@ -253,6 +253,107 @@ static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
253 return found; 253 return found;
254} 254}
255 255
256
257static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
258{
259 u16 pd_bitmap_idx = 0;
260 const unsigned long *pd_bitmap;
261
262 if (dpp_pool) {
263 pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
264 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
265 dev->pd_mgr->max_dpp_pd);
266 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
267 dev->pd_mgr->pd_dpp_count++;
268 if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
269 dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
270 } else {
271 pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
272 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
273 dev->pd_mgr->max_normal_pd);
274 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
275 dev->pd_mgr->pd_norm_count++;
276 if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
277 dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
278 }
279 return pd_bitmap_idx;
280}
281
282static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
283 bool dpp_pool)
284{
285 u16 pd_count;
286 u16 pd_bit_index;
287
288 pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count :
289 dev->pd_mgr->pd_norm_count;
290 if (pd_count == 0)
291 return -EINVAL;
292
293 if (dpp_pool) {
294 pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start;
295 if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) {
296 return -EINVAL;
297 } else {
298 __clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap);
299 dev->pd_mgr->pd_dpp_count--;
300 }
301 } else {
302 pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start;
303 if (pd_bit_index >= dev->pd_mgr->max_normal_pd) {
304 return -EINVAL;
305 } else {
306 __clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap);
307 dev->pd_mgr->pd_norm_count--;
308 }
309 }
310
311 return 0;
312}
313
314static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
315 bool dpp_pool)
316{
317 int status;
318
319 mutex_lock(&dev->dev_lock);
320 status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool);
321 mutex_unlock(&dev->dev_lock);
322 return status;
323}
324
325static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
326{
327 u16 pd_idx = 0;
328 int status = 0;
329
330 mutex_lock(&dev->dev_lock);
331 if (pd->dpp_enabled) {
332 /* try allocating DPP PD, if not available then normal PD */
333 if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) {
334 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true);
335 pd->id = dev->pd_mgr->pd_dpp_start + pd_idx;
336 pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx;
337 } else if (dev->pd_mgr->pd_norm_count <
338 dev->pd_mgr->max_normal_pd) {
339 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
340 pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
341 pd->dpp_enabled = false;
342 } else {
343 status = -EINVAL;
344 }
345 } else {
346 if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) {
347 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
348 pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
349 } else {
350 status = -EINVAL;
351 }
352 }
353 mutex_unlock(&dev->dev_lock);
354 return status;
355}
356
256static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev, 357static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
257 struct ocrdma_ucontext *uctx, 358 struct ocrdma_ucontext *uctx,
258 struct ib_udata *udata) 359 struct ib_udata *udata)
@@ -272,6 +373,11 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
272 dev->attr.wqe_size) : 0; 373 dev->attr.wqe_size) : 0;
273 } 374 }
274 375
376 if (dev->pd_mgr->pd_prealloc_valid) {
377 status = ocrdma_get_pd_num(dev, pd);
378 return (status == 0) ? pd : ERR_PTR(status);
379 }
380
275retry: 381retry:
276 status = ocrdma_mbx_alloc_pd(dev, pd); 382 status = ocrdma_mbx_alloc_pd(dev, pd);
277 if (status) { 383 if (status) {
@@ -299,7 +405,11 @@ static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
299{ 405{
300 int status = 0; 406 int status = 0;
301 407
302 status = ocrdma_mbx_dealloc_pd(dev, pd); 408 if (dev->pd_mgr->pd_prealloc_valid)
409 status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
410 else
411 status = ocrdma_mbx_dealloc_pd(dev, pd);
412
303 kfree(pd); 413 kfree(pd);
304 return status; 414 return status;
305} 415}
@@ -325,7 +435,6 @@ err:
325 435
326static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx) 436static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
327{ 437{
328 int status = 0;
329 struct ocrdma_pd *pd = uctx->cntxt_pd; 438 struct ocrdma_pd *pd = uctx->cntxt_pd;
330 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); 439 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
331 440
@@ -334,8 +443,8 @@ static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
334 __func__, dev->id, pd->id); 443 __func__, dev->id, pd->id);
335 } 444 }
336 uctx->cntxt_pd = NULL; 445 uctx->cntxt_pd = NULL;
337 status = _ocrdma_dealloc_pd(dev, pd); 446 (void)_ocrdma_dealloc_pd(dev, pd);
338 return status; 447 return 0;
339} 448}
340 449
341static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx) 450static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
@@ -569,7 +678,7 @@ err:
569 if (is_uctx_pd) { 678 if (is_uctx_pd) {
570 ocrdma_release_ucontext_pd(uctx); 679 ocrdma_release_ucontext_pd(uctx);
571 } else { 680 } else {
572 status = ocrdma_mbx_dealloc_pd(dev, pd); 681 status = _ocrdma_dealloc_pd(dev, pd);
573 kfree(pd); 682 kfree(pd);
574 } 683 }
575exit: 684exit:
@@ -837,9 +946,8 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
837{ 946{
838 struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr); 947 struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
839 struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device); 948 struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
840 int status;
841 949
842 status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey); 950 (void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
843 951
844 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); 952 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
845 953
@@ -850,11 +958,10 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
850 958
851 /* Don't stop cleanup, in case FW is unresponsive */ 959 /* Don't stop cleanup, in case FW is unresponsive */
852 if (dev->mqe_ctx.fw_error_state) { 960 if (dev->mqe_ctx.fw_error_state) {
853 status = 0;
854 pr_err("%s(%d) fw not responding.\n", 961 pr_err("%s(%d) fw not responding.\n",
855 __func__, dev->id); 962 __func__, dev->id);
856 } 963 }
857 return status; 964 return 0;
858} 965}
859 966
860static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq, 967static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
@@ -986,7 +1093,6 @@ static void ocrdma_flush_cq(struct ocrdma_cq *cq)
986 1093
987int ocrdma_destroy_cq(struct ib_cq *ibcq) 1094int ocrdma_destroy_cq(struct ib_cq *ibcq)
988{ 1095{
989 int status;
990 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); 1096 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
991 struct ocrdma_eq *eq = NULL; 1097 struct ocrdma_eq *eq = NULL;
992 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); 1098 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
@@ -1003,7 +1109,7 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq)
1003 synchronize_irq(irq); 1109 synchronize_irq(irq);
1004 ocrdma_flush_cq(cq); 1110 ocrdma_flush_cq(cq);
1005 1111
1006 status = ocrdma_mbx_destroy_cq(dev, cq); 1112 (void)ocrdma_mbx_destroy_cq(dev, cq);
1007 if (cq->ucontext) { 1113 if (cq->ucontext) {
1008 pdid = cq->ucontext->cntxt_pd->id; 1114 pdid = cq->ucontext->cntxt_pd->id;
1009 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, 1115 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
@@ -1014,7 +1120,7 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq)
1014 } 1120 }
1015 1121
1016 kfree(cq); 1122 kfree(cq);
1017 return status; 1123 return 0;
1018} 1124}
1019 1125
1020static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) 1126static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
@@ -1113,8 +1219,8 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
1113 int status = 0; 1219 int status = 0;
1114 u64 usr_db; 1220 u64 usr_db;
1115 struct ocrdma_create_qp_uresp uresp; 1221 struct ocrdma_create_qp_uresp uresp;
1116 struct ocrdma_dev *dev = qp->dev;
1117 struct ocrdma_pd *pd = qp->pd; 1222 struct ocrdma_pd *pd = qp->pd;
1223 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
1118 1224
1119 memset(&uresp, 0, sizeof(uresp)); 1225 memset(&uresp, 0, sizeof(uresp));
1120 usr_db = dev->nic_info.unmapped_db + 1226 usr_db = dev->nic_info.unmapped_db +
@@ -1253,7 +1359,6 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1253 status = -ENOMEM; 1359 status = -ENOMEM;
1254 goto gen_err; 1360 goto gen_err;
1255 } 1361 }
1256 qp->dev = dev;
1257 ocrdma_set_qp_init_params(qp, pd, attrs); 1362 ocrdma_set_qp_init_params(qp, pd, attrs);
1258 if (udata == NULL) 1363 if (udata == NULL)
1259 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 | 1364 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
@@ -1312,7 +1417,7 @@ int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1312 enum ib_qp_state old_qps; 1417 enum ib_qp_state old_qps;
1313 1418
1314 qp = get_ocrdma_qp(ibqp); 1419 qp = get_ocrdma_qp(ibqp);
1315 dev = qp->dev; 1420 dev = get_ocrdma_dev(ibqp->device);
1316 if (attr_mask & IB_QP_STATE) 1421 if (attr_mask & IB_QP_STATE)
1317 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps); 1422 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
1318 /* if new and previous states are same hw doesn't need to 1423 /* if new and previous states are same hw doesn't need to
@@ -1335,7 +1440,7 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1335 enum ib_qp_state old_qps, new_qps; 1440 enum ib_qp_state old_qps, new_qps;
1336 1441
1337 qp = get_ocrdma_qp(ibqp); 1442 qp = get_ocrdma_qp(ibqp);
1338 dev = qp->dev; 1443 dev = get_ocrdma_dev(ibqp->device);
1339 1444
1340 /* syncronize with multiple context trying to change, retrive qps */ 1445 /* syncronize with multiple context trying to change, retrive qps */
1341 mutex_lock(&dev->dev_lock); 1446 mutex_lock(&dev->dev_lock);
@@ -1402,7 +1507,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
1402 u32 qp_state; 1507 u32 qp_state;
1403 struct ocrdma_qp_params params; 1508 struct ocrdma_qp_params params;
1404 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); 1509 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1405 struct ocrdma_dev *dev = qp->dev; 1510 struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
1406 1511
1407 memset(&params, 0, sizeof(params)); 1512 memset(&params, 0, sizeof(params));
1408 mutex_lock(&dev->dev_lock); 1513 mutex_lock(&dev->dev_lock);
@@ -1412,8 +1517,6 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
1412 goto mbx_err; 1517 goto mbx_err;
1413 if (qp->qp_type == IB_QPT_UD) 1518 if (qp->qp_type == IB_QPT_UD)
1414 qp_attr->qkey = params.qkey; 1519 qp_attr->qkey = params.qkey;
1415 qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT);
1416 qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT);
1417 qp_attr->path_mtu = 1520 qp_attr->path_mtu =
1418 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx & 1521 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
1419 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >> 1522 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
@@ -1468,6 +1571,8 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
1468 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr)); 1571 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
1469 qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >> 1572 qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
1470 OCRDMA_QP_PARAMS_STATE_SHIFT; 1573 OCRDMA_QP_PARAMS_STATE_SHIFT;
1574 qp_attr->qp_state = get_ibqp_state(qp_state);
1575 qp_attr->cur_qp_state = qp_attr->qp_state;
1471 qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0; 1576 qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
1472 qp_attr->max_dest_rd_atomic = 1577 qp_attr->max_dest_rd_atomic =
1473 params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT; 1578 params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
@@ -1475,19 +1580,18 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
1475 params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK; 1580 params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
1476 qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags & 1581 qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
1477 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0; 1582 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
1583 /* Sync driver QP state with FW */
1584 ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL);
1478mbx_err: 1585mbx_err:
1479 return status; 1586 return status;
1480} 1587}
1481 1588
1482static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx) 1589static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx)
1483{ 1590{
1484 int i = idx / 32; 1591 unsigned int i = idx / 32;
1485 unsigned int mask = (1 << (idx % 32)); 1592 u32 mask = (1U << (idx % 32));
1486 1593
1487 if (srq->idx_bit_fields[i] & mask) 1594 srq->idx_bit_fields[i] ^= mask;
1488 srq->idx_bit_fields[i] &= ~mask;
1489 else
1490 srq->idx_bit_fields[i] |= mask;
1491} 1595}
1492 1596
1493static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q) 1597static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
@@ -1596,7 +1700,7 @@ void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
1596{ 1700{
1597 int found = false; 1701 int found = false;
1598 unsigned long flags; 1702 unsigned long flags;
1599 struct ocrdma_dev *dev = qp->dev; 1703 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
1600 /* sync with any active CQ poll */ 1704 /* sync with any active CQ poll */
1601 1705
1602 spin_lock_irqsave(&dev->flush_q_lock, flags); 1706 spin_lock_irqsave(&dev->flush_q_lock, flags);
@@ -1613,7 +1717,6 @@ void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
1613 1717
1614int ocrdma_destroy_qp(struct ib_qp *ibqp) 1718int ocrdma_destroy_qp(struct ib_qp *ibqp)
1615{ 1719{
1616 int status;
1617 struct ocrdma_pd *pd; 1720 struct ocrdma_pd *pd;
1618 struct ocrdma_qp *qp; 1721 struct ocrdma_qp *qp;
1619 struct ocrdma_dev *dev; 1722 struct ocrdma_dev *dev;
@@ -1622,7 +1725,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
1622 unsigned long flags; 1725 unsigned long flags;
1623 1726
1624 qp = get_ocrdma_qp(ibqp); 1727 qp = get_ocrdma_qp(ibqp);
1625 dev = qp->dev; 1728 dev = get_ocrdma_dev(ibqp->device);
1626 1729
1627 attrs.qp_state = IB_QPS_ERR; 1730 attrs.qp_state = IB_QPS_ERR;
1628 pd = qp->pd; 1731 pd = qp->pd;
@@ -1635,7 +1738,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
1635 * discarded until the old CQEs are discarded. 1738 * discarded until the old CQEs are discarded.
1636 */ 1739 */
1637 mutex_lock(&dev->dev_lock); 1740 mutex_lock(&dev->dev_lock);
1638 status = ocrdma_mbx_destroy_qp(dev, qp); 1741 (void) ocrdma_mbx_destroy_qp(dev, qp);
1639 1742
1640 /* 1743 /*
1641 * acquire CQ lock while destroy is in progress, in order to 1744 * acquire CQ lock while destroy is in progress, in order to
@@ -1670,7 +1773,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
1670 kfree(qp->wqe_wr_id_tbl); 1773 kfree(qp->wqe_wr_id_tbl);
1671 kfree(qp->rqe_wr_id_tbl); 1774 kfree(qp->rqe_wr_id_tbl);
1672 kfree(qp); 1775 kfree(qp);
1673 return status; 1776 return 0;
1674} 1777}
1675 1778
1676static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq, 1779static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
@@ -1831,6 +1934,8 @@ static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
1831 else 1934 else
1832 ud_hdr->qkey = wr->wr.ud.remote_qkey; 1935 ud_hdr->qkey = wr->wr.ud.remote_qkey;
1833 ud_hdr->rsvd_ahid = ah->id; 1936 ud_hdr->rsvd_ahid = ah->id;
1937 if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
1938 hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
1834} 1939}
1835 1940
1836static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr, 1941static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
@@ -2007,11 +2112,12 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2007 u64 fbo; 2112 u64 fbo;
2008 struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1); 2113 struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
2009 struct ocrdma_mr *mr; 2114 struct ocrdma_mr *mr;
2115 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2010 u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr); 2116 u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
2011 2117
2012 wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES); 2118 wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
2013 2119
2014 if (wr->wr.fast_reg.page_list_len > qp->dev->attr.max_pages_per_frmr) 2120 if (wr->wr.fast_reg.page_list_len > dev->attr.max_pages_per_frmr)
2015 return -EINVAL; 2121 return -EINVAL;
2016 2122
2017 hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT); 2123 hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
@@ -2039,7 +2145,7 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2039 fast_reg->size_sge = 2145 fast_reg->size_sge =
2040 get_encoded_page_size(1 << wr->wr.fast_reg.page_shift); 2146 get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
2041 mr = (struct ocrdma_mr *) (unsigned long) 2147 mr = (struct ocrdma_mr *) (unsigned long)
2042 qp->dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)]; 2148 dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)];
2043 build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr); 2149 build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
2044 return 0; 2150 return 0;
2045} 2151}
@@ -2112,8 +2218,6 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2112 hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT); 2218 hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
2113 status = ocrdma_build_write(qp, hdr, wr); 2219 status = ocrdma_build_write(qp, hdr, wr);
2114 break; 2220 break;
2115 case IB_WR_RDMA_READ_WITH_INV:
2116 hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
2117 case IB_WR_RDMA_READ: 2221 case IB_WR_RDMA_READ:
2118 ocrdma_build_read(qp, hdr, wr); 2222 ocrdma_build_read(qp, hdr, wr);
2119 break; 2223 break;
@@ -2484,8 +2588,11 @@ static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
2484 bool *polled, bool *stop) 2588 bool *polled, bool *stop)
2485{ 2589{
2486 bool expand; 2590 bool expand;
2591 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2487 int status = (le32_to_cpu(cqe->flags_status_srcqpn) & 2592 int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2488 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; 2593 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2594 if (status < OCRDMA_MAX_CQE_ERR)
2595 atomic_inc(&dev->cqe_err_stats[status]);
2489 2596
2490 /* when hw sq is empty, but rq is not empty, so we continue 2597 /* when hw sq is empty, but rq is not empty, so we continue
2491 * to keep the cqe in order to get the cq event again. 2598 * to keep the cqe in order to get the cq event again.
@@ -2604,6 +2711,10 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2604 int status) 2711 int status)
2605{ 2712{
2606 bool expand; 2713 bool expand;
2714 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2715
2716 if (status < OCRDMA_MAX_CQE_ERR)
2717 atomic_inc(&dev->cqe_err_stats[status]);
2607 2718
2608 /* when hw_rq is empty, but wq is not empty, so continue 2719 /* when hw_rq is empty, but wq is not empty, so continue
2609 * to keep the cqe to get the cq event again. 2720 * to keep the cqe to get the cq event again.