aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/cxgb4/qp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/qp.c')
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c33
1 files changed, 16 insertions, 17 deletions
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 83a01dc0c4c1..0c28ed1eafa6 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -572,9 +572,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
572 err = build_rdma_write(wqe, wr, &len16); 572 err = build_rdma_write(wqe, wr, &len16);
573 break; 573 break;
574 case IB_WR_RDMA_READ: 574 case IB_WR_RDMA_READ:
575 case IB_WR_RDMA_READ_WITH_INV:
575 fw_opcode = FW_RI_RDMA_READ_WR; 576 fw_opcode = FW_RI_RDMA_READ_WR;
576 swsqe->opcode = FW_RI_READ_REQ; 577 swsqe->opcode = FW_RI_READ_REQ;
577 fw_flags = 0; 578 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
579 fw_flags |= FW_RI_RDMA_READ_INVALIDATE;
580 else
581 fw_flags = 0;
578 err = build_rdma_read(wqe, wr, &len16); 582 err = build_rdma_read(wqe, wr, &len16);
579 if (err) 583 if (err)
580 break; 584 break;
@@ -588,6 +592,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
588 err = build_fastreg(wqe, wr, &len16); 592 err = build_fastreg(wqe, wr, &len16);
589 break; 593 break;
590 case IB_WR_LOCAL_INV: 594 case IB_WR_LOCAL_INV:
595 if (wr->send_flags & IB_SEND_FENCE)
596 fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
591 fw_opcode = FW_RI_INV_LSTAG_WR; 597 fw_opcode = FW_RI_INV_LSTAG_WR;
592 swsqe->opcode = FW_RI_LOCAL_INV; 598 swsqe->opcode = FW_RI_LOCAL_INV;
593 err = build_inv_stag(wqe, wr, &len16); 599 err = build_inv_stag(wqe, wr, &len16);
@@ -1339,7 +1345,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
1339 wait_event(qhp->wait, !qhp->ep); 1345 wait_event(qhp->wait, !qhp->ep);
1340 1346
1341 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); 1347 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1342 remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid);
1343 atomic_dec(&qhp->refcnt); 1348 atomic_dec(&qhp->refcnt);
1344 wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); 1349 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
1345 1350
@@ -1442,30 +1447,26 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1442 if (ret) 1447 if (ret)
1443 goto err2; 1448 goto err2;
1444 1449
1445 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.rq.qid);
1446 if (ret)
1447 goto err3;
1448
1449 if (udata) { 1450 if (udata) {
1450 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL); 1451 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
1451 if (!mm1) { 1452 if (!mm1) {
1452 ret = -ENOMEM; 1453 ret = -ENOMEM;
1453 goto err4; 1454 goto err3;
1454 } 1455 }
1455 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); 1456 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
1456 if (!mm2) { 1457 if (!mm2) {
1457 ret = -ENOMEM; 1458 ret = -ENOMEM;
1458 goto err5; 1459 goto err4;
1459 } 1460 }
1460 mm3 = kmalloc(sizeof *mm3, GFP_KERNEL); 1461 mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
1461 if (!mm3) { 1462 if (!mm3) {
1462 ret = -ENOMEM; 1463 ret = -ENOMEM;
1463 goto err6; 1464 goto err5;
1464 } 1465 }
1465 mm4 = kmalloc(sizeof *mm4, GFP_KERNEL); 1466 mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
1466 if (!mm4) { 1467 if (!mm4) {
1467 ret = -ENOMEM; 1468 ret = -ENOMEM;
1468 goto err7; 1469 goto err6;
1469 } 1470 }
1470 1471
1471 uresp.qid_mask = rhp->rdev.qpmask; 1472 uresp.qid_mask = rhp->rdev.qpmask;
@@ -1487,7 +1488,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1487 spin_unlock(&ucontext->mmap_lock); 1488 spin_unlock(&ucontext->mmap_lock);
1488 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); 1489 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
1489 if (ret) 1490 if (ret)
1490 goto err8; 1491 goto err7;
1491 mm1->key = uresp.sq_key; 1492 mm1->key = uresp.sq_key;
1492 mm1->addr = virt_to_phys(qhp->wq.sq.queue); 1493 mm1->addr = virt_to_phys(qhp->wq.sq.queue);
1493 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize); 1494 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
@@ -1511,16 +1512,14 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1511 __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries, 1512 __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
1512 qhp->wq.sq.qid); 1513 qhp->wq.sq.qid);
1513 return &qhp->ibqp; 1514 return &qhp->ibqp;
1514err8:
1515 kfree(mm4);
1516err7: 1515err7:
1517 kfree(mm3); 1516 kfree(mm4);
1518err6: 1517err6:
1519 kfree(mm2); 1518 kfree(mm3);
1520err5: 1519err5:
1521 kfree(mm1); 1520 kfree(mm2);
1522err4: 1521err4:
1523 remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid); 1522 kfree(mm1);
1524err3: 1523err3:
1525 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); 1524 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1526err2: 1525err2: