aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/qib/qib_verbs.c
diff options
context:
space:
mode:
authorMike Marciniszyn <mike.marciniszyn@intel.com>2012-07-19 09:03:56 -0400
committerRoland Dreier <roland@purestorage.com>2012-07-19 14:19:58 -0400
commit551ace124d0ef471e8a5fee3ef9e5bb7460251be (patch)
treed04b3700a959bc1d87f27ba4cc0752b6dd1725b9 /drivers/infiniband/hw/qib/qib_verbs.c
parentf3331f88a4b97530b7acd3112902524d9dc0688c (diff)
IB/qib: Reduce sdma_lock contention
Profiling has shown that sdma_lock is proving a bottleneck for performance. The situations include: - RDMA reads when krcvqs > 1 - post sends from multiple threads For RDMA read the current global qib_wq mechanism runs on all CPUs and contends for the sdma_lock when multiple RMDA read requests are fielded on differenct CPUs. For post sends, the direct call to qib_do_send() from multiple threads causes the contention. Since the sdma mechanism is per port, this fix converts the existing workqueue to a per port single thread workqueue to reduce the lock contention in the RDMA read case, and for any other case where the QP is scheduled via the workqueue mechanism from more than 1 CPU. For the post send case, This patch modifies the post send code to test for a non empty sdma engine. If the sdma is not idle the (now single thread) workqueue will be used to trigger the send engine instead of the direct call to qib_do_send(). Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_verbs.c')
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c29
1 files changed, 26 insertions, 3 deletions
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 03ace0650a8f..fc9b205c2412 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -333,7 +333,8 @@ static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length)
333 * @qp: the QP to post on 333 * @qp: the QP to post on
334 * @wr: the work request to send 334 * @wr: the work request to send
335 */ 335 */
336static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr) 336static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr,
337 int *scheduled)
337{ 338{
338 struct qib_swqe *wqe; 339 struct qib_swqe *wqe;
339 u32 next; 340 u32 next;
@@ -440,6 +441,12 @@ bail_inval_free:
440bail_inval: 441bail_inval:
441 ret = -EINVAL; 442 ret = -EINVAL;
442bail: 443bail:
444 if (!ret && !wr->next &&
445 !qib_sdma_empty(
446 dd_from_ibdev(qp->ibqp.device)->pport + qp->port_num - 1)) {
447 qib_schedule_send(qp);
448 *scheduled = 1;
449 }
443 spin_unlock_irqrestore(&qp->s_lock, flags); 450 spin_unlock_irqrestore(&qp->s_lock, flags);
444 return ret; 451 return ret;
445} 452}
@@ -457,9 +464,10 @@ static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
457{ 464{
458 struct qib_qp *qp = to_iqp(ibqp); 465 struct qib_qp *qp = to_iqp(ibqp);
459 int err = 0; 466 int err = 0;
467 int scheduled = 0;
460 468
461 for (; wr; wr = wr->next) { 469 for (; wr; wr = wr->next) {
462 err = qib_post_one_send(qp, wr); 470 err = qib_post_one_send(qp, wr, &scheduled);
463 if (err) { 471 if (err) {
464 *bad_wr = wr; 472 *bad_wr = wr;
465 goto bail; 473 goto bail;
@@ -467,7 +475,8 @@ static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
467 } 475 }
468 476
469 /* Try to do the send work in the caller's context. */ 477 /* Try to do the send work in the caller's context. */
470 qib_do_send(&qp->s_work); 478 if (!scheduled)
479 qib_do_send(&qp->s_work);
471 480
472bail: 481bail:
473 return err; 482 return err;
@@ -2308,3 +2317,17 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
2308 get_order(lk_tab_size)); 2317 get_order(lk_tab_size));
2309 kfree(dev->qp_table); 2318 kfree(dev->qp_table);
2310} 2319}
2320
2321/*
2322 * This must be called with s_lock held.
2323 */
2324void qib_schedule_send(struct qib_qp *qp)
2325{
2326 if (qib_send_ok(qp)) {
2327 struct qib_ibport *ibp =
2328 to_iport(qp->ibqp.device, qp->port_num);
2329 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2330
2331 queue_work(ppd->qib_wq, &qp->s_work);
2332 }
2333}