diff options
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_qp.c')
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_qp.c | 66 |
1 files changed, 40 insertions, 26 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index f37b0e36732..07c13be07a4 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -240,7 +240,7 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn, | |||
240 | spin_lock(&dev->qp_table.lock); | 240 | spin_lock(&dev->qp_table.lock); |
241 | qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); | 241 | qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); |
242 | if (qp) | 242 | if (qp) |
243 | atomic_inc(&qp->refcount); | 243 | ++qp->refcount; |
244 | spin_unlock(&dev->qp_table.lock); | 244 | spin_unlock(&dev->qp_table.lock); |
245 | 245 | ||
246 | if (!qp) { | 246 | if (!qp) { |
@@ -257,8 +257,10 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn, | |||
257 | if (qp->ibqp.event_handler) | 257 | if (qp->ibqp.event_handler) |
258 | qp->ibqp.event_handler(&event, qp->ibqp.qp_context); | 258 | qp->ibqp.event_handler(&event, qp->ibqp.qp_context); |
259 | 259 | ||
260 | if (atomic_dec_and_test(&qp->refcount)) | 260 | spin_lock(&dev->qp_table.lock); |
261 | if (!--qp->refcount) | ||
261 | wake_up(&qp->wait); | 262 | wake_up(&qp->wait); |
263 | spin_unlock(&dev->qp_table.lock); | ||
262 | } | 264 | } |
263 | 265 | ||
264 | static int to_mthca_state(enum ib_qp_state ib_state) | 266 | static int to_mthca_state(enum ib_qp_state ib_state) |
@@ -833,10 +835,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
833 | * entries and reinitialize the QP. | 835 | * entries and reinitialize the QP. |
834 | */ | 836 | */ |
835 | if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { | 837 | if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { |
836 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, | 838 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, |
837 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | 839 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); |
838 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) | 840 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) |
839 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, | 841 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, |
840 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | 842 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); |
841 | 843 | ||
842 | mthca_wq_init(&qp->sq); | 844 | mthca_wq_init(&qp->sq); |
@@ -1096,7 +1098,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, | |||
1096 | int ret; | 1098 | int ret; |
1097 | int i; | 1099 | int i; |
1098 | 1100 | ||
1099 | atomic_set(&qp->refcount, 1); | 1101 | qp->refcount = 1; |
1100 | init_waitqueue_head(&qp->wait); | 1102 | init_waitqueue_head(&qp->wait); |
1101 | qp->state = IB_QPS_RESET; | 1103 | qp->state = IB_QPS_RESET; |
1102 | qp->atomic_rd_en = 0; | 1104 | qp->atomic_rd_en = 0; |
@@ -1318,6 +1320,17 @@ int mthca_alloc_sqp(struct mthca_dev *dev, | |||
1318 | return err; | 1320 | return err; |
1319 | } | 1321 | } |
1320 | 1322 | ||
1323 | static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp) | ||
1324 | { | ||
1325 | int c; | ||
1326 | |||
1327 | spin_lock_irq(&dev->qp_table.lock); | ||
1328 | c = qp->refcount; | ||
1329 | spin_unlock_irq(&dev->qp_table.lock); | ||
1330 | |||
1331 | return c; | ||
1332 | } | ||
1333 | |||
1321 | void mthca_free_qp(struct mthca_dev *dev, | 1334 | void mthca_free_qp(struct mthca_dev *dev, |
1322 | struct mthca_qp *qp) | 1335 | struct mthca_qp *qp) |
1323 | { | 1336 | { |
@@ -1339,14 +1352,14 @@ void mthca_free_qp(struct mthca_dev *dev, | |||
1339 | spin_lock(&dev->qp_table.lock); | 1352 | spin_lock(&dev->qp_table.lock); |
1340 | mthca_array_clear(&dev->qp_table.qp, | 1353 | mthca_array_clear(&dev->qp_table.qp, |
1341 | qp->qpn & (dev->limits.num_qps - 1)); | 1354 | qp->qpn & (dev->limits.num_qps - 1)); |
1355 | --qp->refcount; | ||
1342 | spin_unlock(&dev->qp_table.lock); | 1356 | spin_unlock(&dev->qp_table.lock); |
1343 | 1357 | ||
1344 | if (send_cq != recv_cq) | 1358 | if (send_cq != recv_cq) |
1345 | spin_unlock(&recv_cq->lock); | 1359 | spin_unlock(&recv_cq->lock); |
1346 | spin_unlock_irq(&send_cq->lock); | 1360 | spin_unlock_irq(&send_cq->lock); |
1347 | 1361 | ||
1348 | atomic_dec(&qp->refcount); | 1362 | wait_event(qp->wait, !get_qp_refcount(dev, qp)); |
1349 | wait_event(qp->wait, !atomic_read(&qp->refcount)); | ||
1350 | 1363 | ||
1351 | if (qp->state != IB_QPS_RESET) | 1364 | if (qp->state != IB_QPS_RESET) |
1352 | mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, | 1365 | mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, |
@@ -1358,10 +1371,10 @@ void mthca_free_qp(struct mthca_dev *dev, | |||
1358 | * unref the mem-free tables and free the QPN in our table. | 1371 | * unref the mem-free tables and free the QPN in our table. |
1359 | */ | 1372 | */ |
1360 | if (!qp->ibqp.uobject) { | 1373 | if (!qp->ibqp.uobject) { |
1361 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, | 1374 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, |
1362 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | 1375 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); |
1363 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) | 1376 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) |
1364 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, | 1377 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, |
1365 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | 1378 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); |
1366 | 1379 | ||
1367 | mthca_free_memfree(dev, qp); | 1380 | mthca_free_memfree(dev, qp); |
@@ -1714,23 +1727,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
1714 | 1727 | ||
1715 | ind = qp->rq.next_ind; | 1728 | ind = qp->rq.next_ind; |
1716 | 1729 | ||
1717 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | 1730 | for (nreq = 0; wr; wr = wr->next) { |
1718 | if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { | ||
1719 | nreq = 0; | ||
1720 | |||
1721 | doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); | ||
1722 | doorbell[1] = cpu_to_be32(qp->qpn << 8); | ||
1723 | |||
1724 | wmb(); | ||
1725 | |||
1726 | mthca_write64(doorbell, | ||
1727 | dev->kar + MTHCA_RECEIVE_DOORBELL, | ||
1728 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | ||
1729 | |||
1730 | qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; | ||
1731 | size0 = 0; | ||
1732 | } | ||
1733 | |||
1734 | if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { | 1731 | if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { |
1735 | mthca_err(dev, "RQ %06x full (%u head, %u tail," | 1732 | mthca_err(dev, "RQ %06x full (%u head, %u tail," |
1736 | " %d max, %d nreq)\n", qp->qpn, | 1733 | " %d max, %d nreq)\n", qp->qpn, |
@@ -1784,6 +1781,23 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
1784 | ++ind; | 1781 | ++ind; |
1785 | if (unlikely(ind >= qp->rq.max)) | 1782 | if (unlikely(ind >= qp->rq.max)) |
1786 | ind -= qp->rq.max; | 1783 | ind -= qp->rq.max; |
1784 | |||
1785 | ++nreq; | ||
1786 | if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { | ||
1787 | nreq = 0; | ||
1788 | |||
1789 | doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); | ||
1790 | doorbell[1] = cpu_to_be32(qp->qpn << 8); | ||
1791 | |||
1792 | wmb(); | ||
1793 | |||
1794 | mthca_write64(doorbell, | ||
1795 | dev->kar + MTHCA_RECEIVE_DOORBELL, | ||
1796 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | ||
1797 | |||
1798 | qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; | ||
1799 | size0 = 0; | ||
1800 | } | ||
1787 | } | 1801 | } |
1788 | 1802 | ||
1789 | out: | 1803 | out: |