aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/qedr
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/qedr')
-rw-r--r--drivers/infiniband/hw/qedr/main.c23
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h8
-rw-r--r--drivers/infiniband/hw/qedr/qedr_cm.c14
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c62
4 files changed, 65 insertions, 42 deletions
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 7b74d09a8217..3ac8aa5ef37d 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -576,8 +576,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
576 return 0; 576 return 0;
577} 577}
578 578
579void qedr_unaffiliated_event(void *context, 579void qedr_unaffiliated_event(void *context, u8 event_code)
580 u8 event_code)
581{ 580{
582 pr_err("unaffiliated event not implemented yet\n"); 581 pr_err("unaffiliated event not implemented yet\n");
583} 582}
@@ -792,6 +791,9 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
792 if (device_create_file(&dev->ibdev.dev, qedr_attributes[i])) 791 if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
793 goto sysfs_err; 792 goto sysfs_err;
794 793
794 if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
795 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
796
795 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n"); 797 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
796 return dev; 798 return dev;
797 799
@@ -824,11 +826,10 @@ static void qedr_remove(struct qedr_dev *dev)
824 ib_dealloc_device(&dev->ibdev); 826 ib_dealloc_device(&dev->ibdev);
825} 827}
826 828
827static int qedr_close(struct qedr_dev *dev) 829static void qedr_close(struct qedr_dev *dev)
828{ 830{
829 qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR); 831 if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
830 832 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
831 return 0;
832} 833}
833 834
834static void qedr_shutdown(struct qedr_dev *dev) 835static void qedr_shutdown(struct qedr_dev *dev)
@@ -837,6 +838,12 @@ static void qedr_shutdown(struct qedr_dev *dev)
837 qedr_remove(dev); 838 qedr_remove(dev);
838} 839}
839 840
841static void qedr_open(struct qedr_dev *dev)
842{
843 if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
844 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
845}
846
840static void qedr_mac_address_change(struct qedr_dev *dev) 847static void qedr_mac_address_change(struct qedr_dev *dev)
841{ 848{
842 union ib_gid *sgid = &dev->sgid_tbl[0]; 849 union ib_gid *sgid = &dev->sgid_tbl[0];
@@ -863,7 +870,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev)
863 870
864 ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr); 871 ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
865 872
866 qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE); 873 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
867 874
868 if (rc) 875 if (rc)
869 DP_ERR(dev, "Error updating mac filter\n"); 876 DP_ERR(dev, "Error updating mac filter\n");
@@ -877,7 +884,7 @@ static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event)
877{ 884{
878 switch (event) { 885 switch (event) {
879 case QEDE_UP: 886 case QEDE_UP:
880 qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE); 887 qedr_open(dev);
881 break; 888 break;
882 case QEDE_DOWN: 889 case QEDE_DOWN:
883 qedr_close(dev); 890 qedr_close(dev);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 620badd7d4fb..bb32e4792ec9 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -113,6 +113,8 @@ struct qedr_device_attr {
113 struct qed_rdma_events events; 113 struct qed_rdma_events events;
114}; 114};
115 115
116#define QEDR_ENET_STATE_BIT (0)
117
116struct qedr_dev { 118struct qedr_dev {
117 struct ib_device ibdev; 119 struct ib_device ibdev;
118 struct qed_dev *cdev; 120 struct qed_dev *cdev;
@@ -153,6 +155,8 @@ struct qedr_dev {
153 struct qedr_cq *gsi_sqcq; 155 struct qedr_cq *gsi_sqcq;
154 struct qedr_cq *gsi_rqcq; 156 struct qedr_cq *gsi_rqcq;
155 struct qedr_qp *gsi_qp; 157 struct qedr_qp *gsi_qp;
158
159 unsigned long enet_state;
156}; 160};
157 161
158#define QEDR_MAX_SQ_PBL (0x8000) 162#define QEDR_MAX_SQ_PBL (0x8000)
@@ -188,6 +192,7 @@ struct qedr_dev {
188#define QEDR_ROCE_MAX_CNQ_SIZE (0x4000) 192#define QEDR_ROCE_MAX_CNQ_SIZE (0x4000)
189 193
190#define QEDR_MAX_PORT (1) 194#define QEDR_MAX_PORT (1)
195#define QEDR_PORT (1)
191 196
192#define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) 197#define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
193 198
@@ -251,9 +256,6 @@ struct qedr_cq {
251 256
252 u16 icid; 257 u16 icid;
253 258
254 /* Lock to protect completion handler */
255 spinlock_t comp_handler_lock;
256
257 /* Lock to protect multiplem CQ's */ 259 /* Lock to protect multiplem CQ's */
258 spinlock_t cq_lock; 260 spinlock_t cq_lock;
259 u8 arm_flags; 261 u8 arm_flags;
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
index 63890ebb72bd..a9a8d8745d2e 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -87,11 +87,8 @@ void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt)
87 qedr_inc_sw_gsi_cons(&qp->sq); 87 qedr_inc_sw_gsi_cons(&qp->sq);
88 spin_unlock_irqrestore(&qp->q_lock, flags); 88 spin_unlock_irqrestore(&qp->q_lock, flags);
89 89
90 if (cq->ibcq.comp_handler) { 90 if (cq->ibcq.comp_handler)
91 spin_lock_irqsave(&cq->comp_handler_lock, flags);
92 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); 91 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
93 spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
94 }
95} 92}
96 93
97void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt, 94void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
@@ -113,11 +110,8 @@ void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
113 110
114 spin_unlock_irqrestore(&qp->q_lock, flags); 111 spin_unlock_irqrestore(&qp->q_lock, flags);
115 112
116 if (cq->ibcq.comp_handler) { 113 if (cq->ibcq.comp_handler)
117 spin_lock_irqsave(&cq->comp_handler_lock, flags);
118 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); 114 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
119 spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
120 }
121} 115}
122 116
123static void qedr_destroy_gsi_cq(struct qedr_dev *dev, 117static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
@@ -404,9 +398,9 @@ static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
404 } 398 }
405 399
406 if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h)) 400 if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
407 packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
408 else
409 packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB; 401 packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
402 else
403 packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
410 404
411 packet->roce_mode = roce_mode; 405 packet->roce_mode = roce_mode;
412 memcpy(packet->header.vaddr, ud_header_buffer, header_size); 406 memcpy(packet->header.vaddr, ud_header_buffer, header_size);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 57c8de208077..c7d6c9a783bd 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -471,8 +471,6 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
471 struct ib_ucontext *context, struct ib_udata *udata) 471 struct ib_ucontext *context, struct ib_udata *udata)
472{ 472{
473 struct qedr_dev *dev = get_qedr_dev(ibdev); 473 struct qedr_dev *dev = get_qedr_dev(ibdev);
474 struct qedr_ucontext *uctx = NULL;
475 struct qedr_alloc_pd_uresp uresp;
476 struct qedr_pd *pd; 474 struct qedr_pd *pd;
477 u16 pd_id; 475 u16 pd_id;
478 int rc; 476 int rc;
@@ -489,21 +487,33 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
489 if (!pd) 487 if (!pd)
490 return ERR_PTR(-ENOMEM); 488 return ERR_PTR(-ENOMEM);
491 489
492 dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id); 490 rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
491 if (rc)
492 goto err;
493 493
494 uresp.pd_id = pd_id;
495 pd->pd_id = pd_id; 494 pd->pd_id = pd_id;
496 495
497 if (udata && context) { 496 if (udata && context) {
497 struct qedr_alloc_pd_uresp uresp;
498
499 uresp.pd_id = pd_id;
500
498 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 501 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
499 if (rc) 502 if (rc) {
500 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id); 503 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
501 uctx = get_qedr_ucontext(context); 504 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
502 uctx->pd = pd; 505 goto err;
503 pd->uctx = uctx; 506 }
507
508 pd->uctx = get_qedr_ucontext(context);
509 pd->uctx->pd = pd;
504 } 510 }
505 511
506 return &pd->ibpd; 512 return &pd->ibpd;
513
514err:
515 kfree(pd);
516 return ERR_PTR(rc);
507} 517}
508 518
509int qedr_dealloc_pd(struct ib_pd *ibpd) 519int qedr_dealloc_pd(struct ib_pd *ibpd)
@@ -1600,7 +1610,7 @@ err0:
1600 return ERR_PTR(-EFAULT); 1610 return ERR_PTR(-EFAULT);
1601} 1611}
1602 1612
1603enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state) 1613static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
1604{ 1614{
1605 switch (qp_state) { 1615 switch (qp_state) {
1606 case QED_ROCE_QP_STATE_RESET: 1616 case QED_ROCE_QP_STATE_RESET:
@@ -1621,7 +1631,8 @@ enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
1621 return IB_QPS_ERR; 1631 return IB_QPS_ERR;
1622} 1632}
1623 1633
1624enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state) 1634static enum qed_roce_qp_state qedr_get_state_from_ibqp(
1635 enum ib_qp_state qp_state)
1625{ 1636{
1626 switch (qp_state) { 1637 switch (qp_state) {
1627 case IB_QPS_RESET: 1638 case IB_QPS_RESET:
@@ -1657,7 +1668,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
1657 int status = 0; 1668 int status = 0;
1658 1669
1659 if (new_state == qp->state) 1670 if (new_state == qp->state)
1660 return 1; 1671 return 0;
1661 1672
1662 switch (qp->state) { 1673 switch (qp->state) {
1663 case QED_ROCE_QP_STATE_RESET: 1674 case QED_ROCE_QP_STATE_RESET:
@@ -1733,6 +1744,14 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
1733 /* ERR->XXX */ 1744 /* ERR->XXX */
1734 switch (new_state) { 1745 switch (new_state) {
1735 case QED_ROCE_QP_STATE_RESET: 1746 case QED_ROCE_QP_STATE_RESET:
1747 if ((qp->rq.prod != qp->rq.cons) ||
1748 (qp->sq.prod != qp->sq.cons)) {
1749 DP_NOTICE(dev,
1750 "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
1751 qp->rq.prod, qp->rq.cons, qp->sq.prod,
1752 qp->sq.cons);
1753 status = -EINVAL;
1754 }
1736 break; 1755 break;
1737 default: 1756 default:
1738 status = -EINVAL; 1757 status = -EINVAL;
@@ -1865,7 +1884,6 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1865 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]); 1884 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
1866 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n", 1885 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
1867 qp_params.remote_mac_addr); 1886 qp_params.remote_mac_addr);
1868;
1869 1887
1870 qp_params.mtu = qp->mtu; 1888 qp_params.mtu = qp->mtu;
1871 qp_params.lb_indication = false; 1889 qp_params.lb_indication = false;
@@ -2016,7 +2034,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
2016 2034
2017 qp_attr->qp_state = qedr_get_ibqp_state(params.state); 2035 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2018 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state); 2036 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
2019 qp_attr->path_mtu = iboe_get_mtu(params.mtu); 2037 qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
2020 qp_attr->path_mig_state = IB_MIG_MIGRATED; 2038 qp_attr->path_mig_state = IB_MIG_MIGRATED;
2021 qp_attr->rq_psn = params.rq_psn; 2039 qp_attr->rq_psn = params.rq_psn;
2022 qp_attr->sq_psn = params.sq_psn; 2040 qp_attr->sq_psn = params.sq_psn;
@@ -2028,7 +2046,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
2028 qp_attr->cap.max_recv_wr = qp->rq.max_wr; 2046 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2029 qp_attr->cap.max_send_sge = qp->sq.max_sges; 2047 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2030 qp_attr->cap.max_recv_sge = qp->rq.max_sges; 2048 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
2031 qp_attr->cap.max_inline_data = qp->max_inline_data; 2049 qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
2032 qp_init_attr->cap = qp_attr->cap; 2050 qp_init_attr->cap = qp_attr->cap;
2033 2051
2034 memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0], 2052 memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
@@ -2302,7 +2320,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr)
2302 return rc; 2320 return rc;
2303} 2321}
2304 2322
2305struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len) 2323static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
2324 int max_page_list_len)
2306{ 2325{
2307 struct qedr_pd *pd = get_qedr_pd(ibpd); 2326 struct qedr_pd *pd = get_qedr_pd(ibpd);
2308 struct qedr_dev *dev = get_qedr_dev(ibpd->device); 2327 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
@@ -2704,7 +2723,7 @@ static int qedr_prepare_reg(struct qedr_qp *qp,
2704 return 0; 2723 return 0;
2705} 2724}
2706 2725
2707enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode) 2726static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
2708{ 2727{
2709 switch (opcode) { 2728 switch (opcode) {
2710 case IB_WR_RDMA_WRITE: 2729 case IB_WR_RDMA_WRITE:
@@ -2729,7 +2748,7 @@ enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
2729 } 2748 }
2730} 2749}
2731 2750
2732inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr) 2751static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
2733{ 2752{
2734 int wq_is_full, err_wr, pbl_is_full; 2753 int wq_is_full, err_wr, pbl_is_full;
2735 struct qedr_dev *dev = qp->dev; 2754 struct qedr_dev *dev = qp->dev;
@@ -2766,7 +2785,7 @@ inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
2766 return true; 2785 return true;
2767} 2786}
2768 2787
2769int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 2788static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2770 struct ib_send_wr **bad_wr) 2789 struct ib_send_wr **bad_wr)
2771{ 2790{
2772 struct qedr_dev *dev = get_qedr_dev(ibqp->device); 2791 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
@@ -3234,9 +3253,10 @@ static int qedr_poll_cq_req(struct qedr_dev *dev,
3234 IB_WC_SUCCESS, 0); 3253 IB_WC_SUCCESS, 0);
3235 break; 3254 break;
3236 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR: 3255 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
3237 DP_ERR(dev, 3256 if (qp->state != QED_ROCE_QP_STATE_ERR)
3238 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n", 3257 DP_ERR(dev,
3239 cq->icid, qp->icid); 3258 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3259 cq->icid, qp->icid);
3240 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons, 3260 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3241 IB_WC_WR_FLUSH_ERR, 1); 3261 IB_WC_WR_FLUSH_ERR, 1);
3242 break; 3262 break;