diff options
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_qp.c')
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_qp.c | 54 |
1 files changed, 40 insertions, 14 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index cd8b6721ac9c..2e8f6f36e0a5 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -99,6 +99,10 @@ enum { | |||
99 | MTHCA_QP_BIT_RSC = 1 << 3 | 99 | MTHCA_QP_BIT_RSC = 1 << 3 |
100 | }; | 100 | }; |
101 | 101 | ||
102 | enum { | ||
103 | MTHCA_SEND_DOORBELL_FENCE = 1 << 5 | ||
104 | }; | ||
105 | |||
102 | struct mthca_qp_path { | 106 | struct mthca_qp_path { |
103 | __be32 port_pkey; | 107 | __be32 port_pkey; |
104 | u8 rnr_retry; | 108 | u8 rnr_retry; |
@@ -1259,6 +1263,32 @@ int mthca_alloc_qp(struct mthca_dev *dev, | |||
1259 | return 0; | 1263 | return 0; |
1260 | } | 1264 | } |
1261 | 1265 | ||
1266 | static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) | ||
1267 | { | ||
1268 | if (send_cq == recv_cq) | ||
1269 | spin_lock_irq(&send_cq->lock); | ||
1270 | else if (send_cq->cqn < recv_cq->cqn) { | ||
1271 | spin_lock_irq(&send_cq->lock); | ||
1272 | spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); | ||
1273 | } else { | ||
1274 | spin_lock_irq(&recv_cq->lock); | ||
1275 | spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); | ||
1276 | } | ||
1277 | } | ||
1278 | |||
1279 | static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) | ||
1280 | { | ||
1281 | if (send_cq == recv_cq) | ||
1282 | spin_unlock_irq(&send_cq->lock); | ||
1283 | else if (send_cq->cqn < recv_cq->cqn) { | ||
1284 | spin_unlock(&recv_cq->lock); | ||
1285 | spin_unlock_irq(&send_cq->lock); | ||
1286 | } else { | ||
1287 | spin_unlock(&send_cq->lock); | ||
1288 | spin_unlock_irq(&recv_cq->lock); | ||
1289 | } | ||
1290 | } | ||
1291 | |||
1262 | int mthca_alloc_sqp(struct mthca_dev *dev, | 1292 | int mthca_alloc_sqp(struct mthca_dev *dev, |
1263 | struct mthca_pd *pd, | 1293 | struct mthca_pd *pd, |
1264 | struct mthca_cq *send_cq, | 1294 | struct mthca_cq *send_cq, |
@@ -1311,17 +1341,13 @@ int mthca_alloc_sqp(struct mthca_dev *dev, | |||
1311 | * Lock CQs here, so that CQ polling code can do QP lookup | 1341 | * Lock CQs here, so that CQ polling code can do QP lookup |
1312 | * without taking a lock. | 1342 | * without taking a lock. |
1313 | */ | 1343 | */ |
1314 | spin_lock_irq(&send_cq->lock); | 1344 | mthca_lock_cqs(send_cq, recv_cq); |
1315 | if (send_cq != recv_cq) | ||
1316 | spin_lock(&recv_cq->lock); | ||
1317 | 1345 | ||
1318 | spin_lock(&dev->qp_table.lock); | 1346 | spin_lock(&dev->qp_table.lock); |
1319 | mthca_array_clear(&dev->qp_table.qp, mqpn); | 1347 | mthca_array_clear(&dev->qp_table.qp, mqpn); |
1320 | spin_unlock(&dev->qp_table.lock); | 1348 | spin_unlock(&dev->qp_table.lock); |
1321 | 1349 | ||
1322 | if (send_cq != recv_cq) | 1350 | mthca_unlock_cqs(send_cq, recv_cq); |
1323 | spin_unlock(&recv_cq->lock); | ||
1324 | spin_unlock_irq(&send_cq->lock); | ||
1325 | 1351 | ||
1326 | err_out: | 1352 | err_out: |
1327 | dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size, | 1353 | dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size, |
@@ -1355,9 +1381,7 @@ void mthca_free_qp(struct mthca_dev *dev, | |||
1355 | * Lock CQs here, so that CQ polling code can do QP lookup | 1381 | * Lock CQs here, so that CQ polling code can do QP lookup |
1356 | * without taking a lock. | 1382 | * without taking a lock. |
1357 | */ | 1383 | */ |
1358 | spin_lock_irq(&send_cq->lock); | 1384 | mthca_lock_cqs(send_cq, recv_cq); |
1359 | if (send_cq != recv_cq) | ||
1360 | spin_lock(&recv_cq->lock); | ||
1361 | 1385 | ||
1362 | spin_lock(&dev->qp_table.lock); | 1386 | spin_lock(&dev->qp_table.lock); |
1363 | mthca_array_clear(&dev->qp_table.qp, | 1387 | mthca_array_clear(&dev->qp_table.qp, |
@@ -1365,9 +1389,7 @@ void mthca_free_qp(struct mthca_dev *dev, | |||
1365 | --qp->refcount; | 1389 | --qp->refcount; |
1366 | spin_unlock(&dev->qp_table.lock); | 1390 | spin_unlock(&dev->qp_table.lock); |
1367 | 1391 | ||
1368 | if (send_cq != recv_cq) | 1392 | mthca_unlock_cqs(send_cq, recv_cq); |
1369 | spin_unlock(&recv_cq->lock); | ||
1370 | spin_unlock_irq(&send_cq->lock); | ||
1371 | 1393 | ||
1372 | wait_event(qp->wait, !get_qp_refcount(dev, qp)); | 1394 | wait_event(qp->wait, !get_qp_refcount(dev, qp)); |
1373 | 1395 | ||
@@ -1502,7 +1524,7 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1502 | int i; | 1524 | int i; |
1503 | int size; | 1525 | int size; |
1504 | int size0 = 0; | 1526 | int size0 = 0; |
1505 | u32 f0 = 0; | 1527 | u32 f0; |
1506 | int ind; | 1528 | int ind; |
1507 | u8 op0 = 0; | 1529 | u8 op0 = 0; |
1508 | 1530 | ||
@@ -1686,6 +1708,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1686 | if (!size0) { | 1708 | if (!size0) { |
1687 | size0 = size; | 1709 | size0 = size; |
1688 | op0 = mthca_opcode[wr->opcode]; | 1710 | op0 = mthca_opcode[wr->opcode]; |
1711 | f0 = wr->send_flags & IB_SEND_FENCE ? | ||
1712 | MTHCA_SEND_DOORBELL_FENCE : 0; | ||
1689 | } | 1713 | } |
1690 | 1714 | ||
1691 | ++ind; | 1715 | ++ind; |
@@ -1843,7 +1867,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1843 | int i; | 1867 | int i; |
1844 | int size; | 1868 | int size; |
1845 | int size0 = 0; | 1869 | int size0 = 0; |
1846 | u32 f0 = 0; | 1870 | u32 f0; |
1847 | int ind; | 1871 | int ind; |
1848 | u8 op0 = 0; | 1872 | u8 op0 = 0; |
1849 | 1873 | ||
@@ -2051,6 +2075,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2051 | if (!size0) { | 2075 | if (!size0) { |
2052 | size0 = size; | 2076 | size0 = size; |
2053 | op0 = mthca_opcode[wr->opcode]; | 2077 | op0 = mthca_opcode[wr->opcode]; |
2078 | f0 = wr->send_flags & IB_SEND_FENCE ? | ||
2079 | MTHCA_SEND_DOORBELL_FENCE : 0; | ||
2054 | } | 2080 | } |
2055 | 2081 | ||
2056 | ++ind; | 2082 | ++ind; |