aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/hfi1/qp.c
diff options
context:
space:
mode:
authorMike Marciniszyn <mike.marciniszyn@intel.com>2018-11-28 13:33:00 -0500
committerJason Gunthorpe <jgg@mellanox.com>2018-12-06 22:15:36 -0500
commit9aefcabe579bca06325ad9e577a36816f57386ff (patch)
tree5d51380942b4dd7b7aa7a2f8b200300f0eec5a28 /drivers/infiniband/hw/hfi1/qp.c
parent18912c4524385dd6532c682cb9d4f6aa39ba8d47 (diff)
IB/hfi1: Reduce lock contention on iowait_lock for sdma and pio
Commit 4e045572e2c2 ("IB/hfi1: Add unique txwait_lock for txreq events") laid the ground work to support per resource waiting locking. This patch adds that with a lock unique to each sdma engine and pio sendcontext and makes necessary changes for verbs, PSM, and vnic to use the new locks. This is particularly beneficial for smaller messages that will exhaust resources at a faster rate. Fixes: 7724105686e7 ("IB/hfi1: add driver files") Reviewed-by: Gary Leshner <Gary.S.Leshner@intel.com> Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'drivers/infiniband/hw/hfi1/qp.c')
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c20
1 files changed, 8 insertions, 12 deletions
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 6f3bc4dab858..e32fbfe029bc 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -368,20 +368,18 @@ bool _hfi1_schedule_send(struct rvt_qp *qp)
368 368
369static void qp_pio_drain(struct rvt_qp *qp) 369static void qp_pio_drain(struct rvt_qp *qp)
370{ 370{
371 struct hfi1_ibdev *dev;
372 struct hfi1_qp_priv *priv = qp->priv; 371 struct hfi1_qp_priv *priv = qp->priv;
373 372
374 if (!priv->s_sendcontext) 373 if (!priv->s_sendcontext)
375 return; 374 return;
376 dev = to_idev(qp->ibqp.device);
377 while (iowait_pio_pending(&priv->s_iowait)) { 375 while (iowait_pio_pending(&priv->s_iowait)) {
378 write_seqlock_irq(&dev->iowait_lock); 376 write_seqlock_irq(&priv->s_sendcontext->waitlock);
379 hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 1); 377 hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 1);
380 write_sequnlock_irq(&dev->iowait_lock); 378 write_sequnlock_irq(&priv->s_sendcontext->waitlock);
381 iowait_pio_drain(&priv->s_iowait); 379 iowait_pio_drain(&priv->s_iowait);
382 write_seqlock_irq(&dev->iowait_lock); 380 write_seqlock_irq(&priv->s_sendcontext->waitlock);
383 hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 0); 381 hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 0);
384 write_sequnlock_irq(&dev->iowait_lock); 382 write_sequnlock_irq(&priv->s_sendcontext->waitlock);
385 } 383 }
386} 384}
387 385
@@ -452,7 +450,6 @@ static int iowait_sleep(
452 struct hfi1_qp_priv *priv; 450 struct hfi1_qp_priv *priv;
453 unsigned long flags; 451 unsigned long flags;
454 int ret = 0; 452 int ret = 0;
455 struct hfi1_ibdev *dev;
456 453
457 qp = tx->qp; 454 qp = tx->qp;
458 priv = qp->priv; 455 priv = qp->priv;
@@ -465,9 +462,8 @@ static int iowait_sleep(
465 * buffer and undoing the side effects of the copy. 462 * buffer and undoing the side effects of the copy.
466 */ 463 */
467 /* Make a common routine? */ 464 /* Make a common routine? */
468 dev = &sde->dd->verbs_dev;
469 list_add_tail(&stx->list, &wait->tx_head); 465 list_add_tail(&stx->list, &wait->tx_head);
470 write_seqlock(&dev->iowait_lock); 466 write_seqlock(&sde->waitlock);
471 if (sdma_progress(sde, seq, stx)) 467 if (sdma_progress(sde, seq, stx))
472 goto eagain; 468 goto eagain;
473 if (list_empty(&priv->s_iowait.list)) { 469 if (list_empty(&priv->s_iowait.list)) {
@@ -478,11 +474,11 @@ static int iowait_sleep(
478 qp->s_flags |= RVT_S_WAIT_DMA_DESC; 474 qp->s_flags |= RVT_S_WAIT_DMA_DESC;
479 iowait_queue(pkts_sent, &priv->s_iowait, 475 iowait_queue(pkts_sent, &priv->s_iowait,
480 &sde->dmawait); 476 &sde->dmawait);
481 priv->s_iowait.lock = &dev->iowait_lock; 477 priv->s_iowait.lock = &sde->waitlock;
482 trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC); 478 trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC);
483 rvt_get_qp(qp); 479 rvt_get_qp(qp);
484 } 480 }
485 write_sequnlock(&dev->iowait_lock); 481 write_sequnlock(&sde->waitlock);
486 hfi1_qp_unbusy(qp, wait); 482 hfi1_qp_unbusy(qp, wait);
487 spin_unlock_irqrestore(&qp->s_lock, flags); 483 spin_unlock_irqrestore(&qp->s_lock, flags);
488 ret = -EBUSY; 484 ret = -EBUSY;
@@ -492,7 +488,7 @@ static int iowait_sleep(
492 } 488 }
493 return ret; 489 return ret;
494eagain: 490eagain:
495 write_sequnlock(&dev->iowait_lock); 491 write_sequnlock(&sde->waitlock);
496 spin_unlock_irqrestore(&qp->s_lock, flags); 492 spin_unlock_irqrestore(&qp->s_lock, flags);
497 list_del_init(&stx->list); 493 list_del_init(&stx->list);
498 return -EAGAIN; 494 return -EAGAIN;