diff options
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/qp.c')
-rw-r--r-- | drivers/infiniband/hw/cxgb4/qp.c | 81 |
1 files changed, 52 insertions, 29 deletions
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index cb76eb5eee1f..086f62f5dc9e 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -212,13 +212,23 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | |||
212 | 212 | ||
213 | wq->db = rdev->lldi.db_reg; | 213 | wq->db = rdev->lldi.db_reg; |
214 | wq->gts = rdev->lldi.gts_reg; | 214 | wq->gts = rdev->lldi.gts_reg; |
215 | if (user) { | 215 | if (user || is_t5(rdev->lldi.adapter_type)) { |
216 | wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) + | 216 | u32 off; |
217 | (wq->sq.qid << rdev->qpshift); | 217 | |
218 | wq->sq.udb &= PAGE_MASK; | 218 | off = (wq->sq.qid << rdev->qpshift) & PAGE_MASK; |
219 | wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) + | 219 | if (user) { |
220 | (wq->rq.qid << rdev->qpshift); | 220 | wq->sq.udb = (u64 __iomem *)(rdev->bar2_pa + off); |
221 | wq->rq.udb &= PAGE_MASK; | 221 | } else { |
222 | off += 128 * (wq->sq.qid & rdev->qpmask) + 8; | ||
223 | wq->sq.udb = (u64 __iomem *)(rdev->bar2_kva + off); | ||
224 | } | ||
225 | off = (wq->rq.qid << rdev->qpshift) & PAGE_MASK; | ||
226 | if (user) { | ||
227 | wq->rq.udb = (u64 __iomem *)(rdev->bar2_pa + off); | ||
228 | } else { | ||
229 | off += 128 * (wq->rq.qid & rdev->qpmask) + 8; | ||
230 | wq->rq.udb = (u64 __iomem *)(rdev->bar2_kva + off); | ||
231 | } | ||
222 | } | 232 | } |
223 | wq->rdev = rdev; | 233 | wq->rdev = rdev; |
224 | wq->rq.msn = 1; | 234 | wq->rq.msn = 1; |
@@ -299,9 +309,10 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | |||
299 | if (ret) | 309 | if (ret) |
300 | goto free_dma; | 310 | goto free_dma; |
301 | 311 | ||
302 | PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n", | 312 | PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%lx rqudb 0x%lx\n", |
303 | __func__, wq->sq.qid, wq->rq.qid, wq->db, | 313 | __func__, wq->sq.qid, wq->rq.qid, wq->db, |
304 | (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb); | 314 | (__force unsigned long) wq->sq.udb, |
315 | (__force unsigned long) wq->rq.udb); | ||
305 | 316 | ||
306 | return 0; | 317 | return 0; |
307 | free_dma: | 318 | free_dma: |
@@ -425,6 +436,8 @@ static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, | |||
425 | default: | 436 | default: |
426 | return -EINVAL; | 437 | return -EINVAL; |
427 | } | 438 | } |
439 | wqe->send.r3 = 0; | ||
440 | wqe->send.r4 = 0; | ||
428 | 441 | ||
429 | plen = 0; | 442 | plen = 0; |
430 | if (wr->num_sge) { | 443 | if (wr->num_sge) { |
@@ -555,7 +568,8 @@ static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe, | |||
555 | int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32); | 568 | int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32); |
556 | int rem; | 569 | int rem; |
557 | 570 | ||
558 | if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH) | 571 | if (wr->wr.fast_reg.page_list_len > |
572 | t4_max_fr_depth(use_dsgl)) | ||
559 | return -EINVAL; | 573 | return -EINVAL; |
560 | 574 | ||
561 | wqe->fr.qpbinde_to_dcacpu = 0; | 575 | wqe->fr.qpbinde_to_dcacpu = 0; |
@@ -650,9 +664,10 @@ static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc) | |||
650 | 664 | ||
651 | spin_lock_irqsave(&qhp->rhp->lock, flags); | 665 | spin_lock_irqsave(&qhp->rhp->lock, flags); |
652 | spin_lock(&qhp->lock); | 666 | spin_lock(&qhp->lock); |
653 | if (qhp->rhp->db_state == NORMAL) { | 667 | if (qhp->rhp->db_state == NORMAL) |
654 | t4_ring_sq_db(&qhp->wq, inc); | 668 | t4_ring_sq_db(&qhp->wq, inc, |
655 | } else { | 669 | is_t5(qhp->rhp->rdev.lldi.adapter_type), NULL); |
670 | else { | ||
656 | add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); | 671 | add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); |
657 | qhp->wq.sq.wq_pidx_inc += inc; | 672 | qhp->wq.sq.wq_pidx_inc += inc; |
658 | } | 673 | } |
@@ -667,9 +682,10 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc) | |||
667 | 682 | ||
668 | spin_lock_irqsave(&qhp->rhp->lock, flags); | 683 | spin_lock_irqsave(&qhp->rhp->lock, flags); |
669 | spin_lock(&qhp->lock); | 684 | spin_lock(&qhp->lock); |
670 | if (qhp->rhp->db_state == NORMAL) { | 685 | if (qhp->rhp->db_state == NORMAL) |
671 | t4_ring_rq_db(&qhp->wq, inc); | 686 | t4_ring_rq_db(&qhp->wq, inc, |
672 | } else { | 687 | is_t5(qhp->rhp->rdev.lldi.adapter_type), NULL); |
688 | else { | ||
673 | add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); | 689 | add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); |
674 | qhp->wq.rq.wq_pidx_inc += inc; | 690 | qhp->wq.rq.wq_pidx_inc += inc; |
675 | } | 691 | } |
@@ -686,7 +702,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
686 | enum fw_wr_opcodes fw_opcode = 0; | 702 | enum fw_wr_opcodes fw_opcode = 0; |
687 | enum fw_ri_wr_flags fw_flags; | 703 | enum fw_ri_wr_flags fw_flags; |
688 | struct c4iw_qp *qhp; | 704 | struct c4iw_qp *qhp; |
689 | union t4_wr *wqe; | 705 | union t4_wr *wqe = NULL; |
690 | u32 num_wrs; | 706 | u32 num_wrs; |
691 | struct t4_swsqe *swsqe; | 707 | struct t4_swsqe *swsqe; |
692 | unsigned long flag; | 708 | unsigned long flag; |
@@ -792,7 +808,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
792 | idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); | 808 | idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); |
793 | } | 809 | } |
794 | if (!qhp->rhp->rdev.status_page->db_off) { | 810 | if (!qhp->rhp->rdev.status_page->db_off) { |
795 | t4_ring_sq_db(&qhp->wq, idx); | 811 | t4_ring_sq_db(&qhp->wq, idx, |
812 | is_t5(qhp->rhp->rdev.lldi.adapter_type), wqe); | ||
796 | spin_unlock_irqrestore(&qhp->lock, flag); | 813 | spin_unlock_irqrestore(&qhp->lock, flag); |
797 | } else { | 814 | } else { |
798 | spin_unlock_irqrestore(&qhp->lock, flag); | 815 | spin_unlock_irqrestore(&qhp->lock, flag); |
@@ -806,7 +823,7 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
806 | { | 823 | { |
807 | int err = 0; | 824 | int err = 0; |
808 | struct c4iw_qp *qhp; | 825 | struct c4iw_qp *qhp; |
809 | union t4_recv_wr *wqe; | 826 | union t4_recv_wr *wqe = NULL; |
810 | u32 num_wrs; | 827 | u32 num_wrs; |
811 | u8 len16 = 0; | 828 | u8 len16 = 0; |
812 | unsigned long flag; | 829 | unsigned long flag; |
@@ -858,7 +875,8 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
858 | num_wrs--; | 875 | num_wrs--; |
859 | } | 876 | } |
860 | if (!qhp->rhp->rdev.status_page->db_off) { | 877 | if (!qhp->rhp->rdev.status_page->db_off) { |
861 | t4_ring_rq_db(&qhp->wq, idx); | 878 | t4_ring_rq_db(&qhp->wq, idx, |
879 | is_t5(qhp->rhp->rdev.lldi.adapter_type), wqe); | ||
862 | spin_unlock_irqrestore(&qhp->lock, flag); | 880 | spin_unlock_irqrestore(&qhp->lock, flag); |
863 | } else { | 881 | } else { |
864 | spin_unlock_irqrestore(&qhp->lock, flag); | 882 | spin_unlock_irqrestore(&qhp->lock, flag); |
@@ -1352,6 +1370,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
1352 | switch (attrs->next_state) { | 1370 | switch (attrs->next_state) { |
1353 | case C4IW_QP_STATE_CLOSING: | 1371 | case C4IW_QP_STATE_CLOSING: |
1354 | BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2); | 1372 | BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2); |
1373 | t4_set_wq_in_error(&qhp->wq); | ||
1355 | set_state(qhp, C4IW_QP_STATE_CLOSING); | 1374 | set_state(qhp, C4IW_QP_STATE_CLOSING); |
1356 | ep = qhp->ep; | 1375 | ep = qhp->ep; |
1357 | if (!internal) { | 1376 | if (!internal) { |
@@ -1359,30 +1378,30 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
1359 | disconnect = 1; | 1378 | disconnect = 1; |
1360 | c4iw_get_ep(&qhp->ep->com); | 1379 | c4iw_get_ep(&qhp->ep->com); |
1361 | } | 1380 | } |
1362 | t4_set_wq_in_error(&qhp->wq); | ||
1363 | ret = rdma_fini(rhp, qhp, ep); | 1381 | ret = rdma_fini(rhp, qhp, ep); |
1364 | if (ret) | 1382 | if (ret) |
1365 | goto err; | 1383 | goto err; |
1366 | break; | 1384 | break; |
1367 | case C4IW_QP_STATE_TERMINATE: | 1385 | case C4IW_QP_STATE_TERMINATE: |
1386 | t4_set_wq_in_error(&qhp->wq); | ||
1368 | set_state(qhp, C4IW_QP_STATE_TERMINATE); | 1387 | set_state(qhp, C4IW_QP_STATE_TERMINATE); |
1369 | qhp->attr.layer_etype = attrs->layer_etype; | 1388 | qhp->attr.layer_etype = attrs->layer_etype; |
1370 | qhp->attr.ecode = attrs->ecode; | 1389 | qhp->attr.ecode = attrs->ecode; |
1371 | t4_set_wq_in_error(&qhp->wq); | ||
1372 | ep = qhp->ep; | 1390 | ep = qhp->ep; |
1373 | disconnect = 1; | 1391 | if (!internal) { |
1374 | if (!internal) | 1392 | c4iw_get_ep(&qhp->ep->com); |
1375 | terminate = 1; | 1393 | terminate = 1; |
1376 | else { | 1394 | disconnect = 1; |
1395 | } else { | ||
1396 | terminate = qhp->attr.send_term; | ||
1377 | ret = rdma_fini(rhp, qhp, ep); | 1397 | ret = rdma_fini(rhp, qhp, ep); |
1378 | if (ret) | 1398 | if (ret) |
1379 | goto err; | 1399 | goto err; |
1380 | } | 1400 | } |
1381 | c4iw_get_ep(&qhp->ep->com); | ||
1382 | break; | 1401 | break; |
1383 | case C4IW_QP_STATE_ERROR: | 1402 | case C4IW_QP_STATE_ERROR: |
1384 | set_state(qhp, C4IW_QP_STATE_ERROR); | ||
1385 | t4_set_wq_in_error(&qhp->wq); | 1403 | t4_set_wq_in_error(&qhp->wq); |
1404 | set_state(qhp, C4IW_QP_STATE_ERROR); | ||
1386 | if (!internal) { | 1405 | if (!internal) { |
1387 | abort = 1; | 1406 | abort = 1; |
1388 | disconnect = 1; | 1407 | disconnect = 1; |
@@ -1677,11 +1696,11 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, | |||
1677 | mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize); | 1696 | mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize); |
1678 | insert_mmap(ucontext, mm2); | 1697 | insert_mmap(ucontext, mm2); |
1679 | mm3->key = uresp.sq_db_gts_key; | 1698 | mm3->key = uresp.sq_db_gts_key; |
1680 | mm3->addr = qhp->wq.sq.udb; | 1699 | mm3->addr = (__force unsigned long) qhp->wq.sq.udb; |
1681 | mm3->len = PAGE_SIZE; | 1700 | mm3->len = PAGE_SIZE; |
1682 | insert_mmap(ucontext, mm3); | 1701 | insert_mmap(ucontext, mm3); |
1683 | mm4->key = uresp.rq_db_gts_key; | 1702 | mm4->key = uresp.rq_db_gts_key; |
1684 | mm4->addr = qhp->wq.rq.udb; | 1703 | mm4->addr = (__force unsigned long) qhp->wq.rq.udb; |
1685 | mm4->len = PAGE_SIZE; | 1704 | mm4->len = PAGE_SIZE; |
1686 | insert_mmap(ucontext, mm4); | 1705 | insert_mmap(ucontext, mm4); |
1687 | if (mm5) { | 1706 | if (mm5) { |
@@ -1758,11 +1777,15 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
1758 | /* | 1777 | /* |
1759 | * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for | 1778 | * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for |
1760 | * ringing the queue db when we're in DB_FULL mode. | 1779 | * ringing the queue db when we're in DB_FULL mode. |
1780 | * Only allow this on T4 devices. | ||
1761 | */ | 1781 | */ |
1762 | attrs.sq_db_inc = attr->sq_psn; | 1782 | attrs.sq_db_inc = attr->sq_psn; |
1763 | attrs.rq_db_inc = attr->rq_psn; | 1783 | attrs.rq_db_inc = attr->rq_psn; |
1764 | mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0; | 1784 | mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0; |
1765 | mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0; | 1785 | mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0; |
1786 | if (is_t5(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) && | ||
1787 | (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB))) | ||
1788 | return -EINVAL; | ||
1766 | 1789 | ||
1767 | return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); | 1790 | return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); |
1768 | } | 1791 | } |