diff options
author | Mike Marciniszyn <mike.marciniszyn@intel.com> | 2012-07-19 09:03:56 -0400 |
---|---|---|
committer | Roland Dreier <roland@purestorage.com> | 2012-07-19 14:19:58 -0400 |
commit | 551ace124d0ef471e8a5fee3ef9e5bb7460251be (patch) | |
tree | d04b3700a959bc1d87f27ba4cc0752b6dd1725b9 /drivers/infiniband/hw/qib | |
parent | f3331f88a4b97530b7acd3112902524d9dc0688c (diff) |
IB/qib: Reduce sdma_lock contention
Profiling has shown that sdma_lock is proving a bottleneck for
performance. The situations include:
- RDMA reads when krcvqs > 1
- post sends from multiple threads
For RDMA read the current global qib_wq mechanism runs on all CPUs
and contends for the sdma_lock when multiple RMDA read requests are
fielded on differenct CPUs. For post sends, the direct call to
qib_do_send() from multiple threads causes the contention.
Since the sdma mechanism is per port, this fix converts the existing
workqueue to a per port single thread workqueue to reduce the lock
contention in the RDMA read case, and for any other case where the QP
is scheduled via the workqueue mechanism from more than 1 CPU.
For the post send case, This patch modifies the post send code to test
for a non empty sdma engine. If the sdma is not idle the (now single
thread) workqueue will be used to trigger the send engine instead of
the direct call to qib_do_send().
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband/hw/qib')
-rw-r--r-- | drivers/infiniband/hw/qib/qib.h | 10 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_init.c | 51 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_verbs.c | 29 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_verbs.h | 7 |
4 files changed, 85 insertions, 12 deletions
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index 7e62f4137148..cbe577151457 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h | |||
@@ -1,8 +1,8 @@ | |||
1 | #ifndef _QIB_KERNEL_H | 1 | #ifndef _QIB_KERNEL_H |
2 | #define _QIB_KERNEL_H | 2 | #define _QIB_KERNEL_H |
3 | /* | 3 | /* |
4 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. | 4 | * Copyright (c) 2012 Intel Corporation. All rights reserved. |
5 | * All rights reserved. | 5 | * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. |
6 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | 6 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. |
7 | * | 7 | * |
8 | * This software is available to you under a choice of one of two | 8 | * This software is available to you under a choice of one of two |
@@ -544,6 +544,7 @@ struct qib_pportdata { | |||
544 | 544 | ||
545 | /* read mostly */ | 545 | /* read mostly */ |
546 | struct qib_sdma_desc *sdma_descq; | 546 | struct qib_sdma_desc *sdma_descq; |
547 | struct workqueue_struct *qib_wq; | ||
547 | struct qib_sdma_state sdma_state; | 548 | struct qib_sdma_state sdma_state; |
548 | dma_addr_t sdma_descq_phys; | 549 | dma_addr_t sdma_descq_phys; |
549 | volatile __le64 *sdma_head_dma; /* DMA'ed by chip */ | 550 | volatile __le64 *sdma_head_dma; /* DMA'ed by chip */ |
@@ -1267,6 +1268,11 @@ int qib_sdma_verbs_send(struct qib_pportdata *, struct qib_sge_state *, | |||
1267 | /* ppd->sdma_lock should be locked before calling this. */ | 1268 | /* ppd->sdma_lock should be locked before calling this. */ |
1268 | int qib_sdma_make_progress(struct qib_pportdata *dd); | 1269 | int qib_sdma_make_progress(struct qib_pportdata *dd); |
1269 | 1270 | ||
1271 | static inline int qib_sdma_empty(const struct qib_pportdata *ppd) | ||
1272 | { | ||
1273 | return ppd->sdma_descq_added == ppd->sdma_descq_removed; | ||
1274 | } | ||
1275 | |||
1270 | /* must be called under qib_sdma_lock */ | 1276 | /* must be called under qib_sdma_lock */ |
1271 | static inline u16 qib_sdma_descq_freecnt(const struct qib_pportdata *ppd) | 1277 | static inline u16 qib_sdma_descq_freecnt(const struct qib_pportdata *ppd) |
1272 | { | 1278 | { |
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index dc14e100a7f1..306e65e99e99 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. | 2 | * Copyright (c) 2012 Intel Corporation. All rights reserved. |
3 | * All rights reserved. | 3 | * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. |
4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | 4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. |
5 | * | 5 | * |
6 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
@@ -210,6 +210,8 @@ void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, | |||
210 | init_timer(&ppd->symerr_clear_timer); | 210 | init_timer(&ppd->symerr_clear_timer); |
211 | ppd->symerr_clear_timer.function = qib_clear_symerror_on_linkup; | 211 | ppd->symerr_clear_timer.function = qib_clear_symerror_on_linkup; |
212 | ppd->symerr_clear_timer.data = (unsigned long)ppd; | 212 | ppd->symerr_clear_timer.data = (unsigned long)ppd; |
213 | |||
214 | ppd->qib_wq = NULL; | ||
213 | } | 215 | } |
214 | 216 | ||
215 | static int init_pioavailregs(struct qib_devdata *dd) | 217 | static int init_pioavailregs(struct qib_devdata *dd) |
@@ -483,6 +485,42 @@ static void init_piobuf_state(struct qib_devdata *dd) | |||
483 | } | 485 | } |
484 | 486 | ||
485 | /** | 487 | /** |
488 | * qib_create_workqueues - create per port workqueues | ||
489 | * @dd: the qlogic_ib device | ||
490 | */ | ||
491 | static int qib_create_workqueues(struct qib_devdata *dd) | ||
492 | { | ||
493 | int pidx; | ||
494 | struct qib_pportdata *ppd; | ||
495 | |||
496 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | ||
497 | ppd = dd->pport + pidx; | ||
498 | if (!ppd->qib_wq) { | ||
499 | char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */ | ||
500 | snprintf(wq_name, sizeof(wq_name), "qib%d_%d", | ||
501 | dd->unit, pidx); | ||
502 | ppd->qib_wq = | ||
503 | create_singlethread_workqueue(wq_name); | ||
504 | if (!ppd->qib_wq) | ||
505 | goto wq_error; | ||
506 | } | ||
507 | } | ||
508 | return 0; | ||
509 | wq_error: | ||
510 | pr_err( | ||
511 | QIB_DRV_NAME ": create_singlethread_workqueue failed for port %d\n", | ||
512 | pidx + 1); | ||
513 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | ||
514 | ppd = dd->pport + pidx; | ||
515 | if (ppd->qib_wq) { | ||
516 | destroy_workqueue(ppd->qib_wq); | ||
517 | ppd->qib_wq = NULL; | ||
518 | } | ||
519 | } | ||
520 | return -ENOMEM; | ||
521 | } | ||
522 | |||
523 | /** | ||
486 | * qib_init - do the actual initialization sequence on the chip | 524 | * qib_init - do the actual initialization sequence on the chip |
487 | * @dd: the qlogic_ib device | 525 | * @dd: the qlogic_ib device |
488 | * @reinit: reinitializing, so don't allocate new memory | 526 | * @reinit: reinitializing, so don't allocate new memory |
@@ -764,6 +802,11 @@ static void qib_shutdown_device(struct qib_devdata *dd) | |||
764 | * We can't count on interrupts since we are stopping. | 802 | * We can't count on interrupts since we are stopping. |
765 | */ | 803 | */ |
766 | dd->f_quiet_serdes(ppd); | 804 | dd->f_quiet_serdes(ppd); |
805 | |||
806 | if (ppd->qib_wq) { | ||
807 | destroy_workqueue(ppd->qib_wq); | ||
808 | ppd->qib_wq = NULL; | ||
809 | } | ||
767 | } | 810 | } |
768 | 811 | ||
769 | qib_update_eeprom_log(dd); | 812 | qib_update_eeprom_log(dd); |
@@ -1249,6 +1292,10 @@ static int __devinit qib_init_one(struct pci_dev *pdev, | |||
1249 | if (ret) | 1292 | if (ret) |
1250 | goto bail; /* error already printed */ | 1293 | goto bail; /* error already printed */ |
1251 | 1294 | ||
1295 | ret = qib_create_workqueues(dd); | ||
1296 | if (ret) | ||
1297 | goto bail; | ||
1298 | |||
1252 | /* do the generic initialization */ | 1299 | /* do the generic initialization */ |
1253 | initfail = qib_init(dd, 0); | 1300 | initfail = qib_init(dd, 0); |
1254 | 1301 | ||
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 03ace0650a8f..fc9b205c2412 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c | |||
@@ -333,7 +333,8 @@ static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length) | |||
333 | * @qp: the QP to post on | 333 | * @qp: the QP to post on |
334 | * @wr: the work request to send | 334 | * @wr: the work request to send |
335 | */ | 335 | */ |
336 | static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr) | 336 | static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr, |
337 | int *scheduled) | ||
337 | { | 338 | { |
338 | struct qib_swqe *wqe; | 339 | struct qib_swqe *wqe; |
339 | u32 next; | 340 | u32 next; |
@@ -440,6 +441,12 @@ bail_inval_free: | |||
440 | bail_inval: | 441 | bail_inval: |
441 | ret = -EINVAL; | 442 | ret = -EINVAL; |
442 | bail: | 443 | bail: |
444 | if (!ret && !wr->next && | ||
445 | !qib_sdma_empty( | ||
446 | dd_from_ibdev(qp->ibqp.device)->pport + qp->port_num - 1)) { | ||
447 | qib_schedule_send(qp); | ||
448 | *scheduled = 1; | ||
449 | } | ||
443 | spin_unlock_irqrestore(&qp->s_lock, flags); | 450 | spin_unlock_irqrestore(&qp->s_lock, flags); |
444 | return ret; | 451 | return ret; |
445 | } | 452 | } |
@@ -457,9 +464,10 @@ static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
457 | { | 464 | { |
458 | struct qib_qp *qp = to_iqp(ibqp); | 465 | struct qib_qp *qp = to_iqp(ibqp); |
459 | int err = 0; | 466 | int err = 0; |
467 | int scheduled = 0; | ||
460 | 468 | ||
461 | for (; wr; wr = wr->next) { | 469 | for (; wr; wr = wr->next) { |
462 | err = qib_post_one_send(qp, wr); | 470 | err = qib_post_one_send(qp, wr, &scheduled); |
463 | if (err) { | 471 | if (err) { |
464 | *bad_wr = wr; | 472 | *bad_wr = wr; |
465 | goto bail; | 473 | goto bail; |
@@ -467,7 +475,8 @@ static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
467 | } | 475 | } |
468 | 476 | ||
469 | /* Try to do the send work in the caller's context. */ | 477 | /* Try to do the send work in the caller's context. */ |
470 | qib_do_send(&qp->s_work); | 478 | if (!scheduled) |
479 | qib_do_send(&qp->s_work); | ||
471 | 480 | ||
472 | bail: | 481 | bail: |
473 | return err; | 482 | return err; |
@@ -2308,3 +2317,17 @@ void qib_unregister_ib_device(struct qib_devdata *dd) | |||
2308 | get_order(lk_tab_size)); | 2317 | get_order(lk_tab_size)); |
2309 | kfree(dev->qp_table); | 2318 | kfree(dev->qp_table); |
2310 | } | 2319 | } |
2320 | |||
2321 | /* | ||
2322 | * This must be called with s_lock held. | ||
2323 | */ | ||
2324 | void qib_schedule_send(struct qib_qp *qp) | ||
2325 | { | ||
2326 | if (qib_send_ok(qp)) { | ||
2327 | struct qib_ibport *ibp = | ||
2328 | to_iport(qp->ibqp.device, qp->port_num); | ||
2329 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
2330 | |||
2331 | queue_work(ppd->qib_wq, &qp->s_work); | ||
2332 | } | ||
2333 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index 61fad05328ca..aff8b2c17886 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h | |||
@@ -727,6 +727,7 @@ struct qib_ibport { | |||
727 | struct qib_opcode_stats opstats[128]; | 727 | struct qib_opcode_stats opstats[128]; |
728 | }; | 728 | }; |
729 | 729 | ||
730 | |||
730 | struct qib_ibdev { | 731 | struct qib_ibdev { |
731 | struct ib_device ibdev; | 732 | struct ib_device ibdev; |
732 | struct list_head pending_mmaps; | 733 | struct list_head pending_mmaps; |
@@ -836,11 +837,7 @@ extern struct workqueue_struct *qib_cq_wq; | |||
836 | /* | 837 | /* |
837 | * This must be called with s_lock held. | 838 | * This must be called with s_lock held. |
838 | */ | 839 | */ |
839 | static inline void qib_schedule_send(struct qib_qp *qp) | 840 | void qib_schedule_send(struct qib_qp *qp); |
840 | { | ||
841 | if (qib_send_ok(qp)) | ||
842 | queue_work(ib_wq, &qp->s_work); | ||
843 | } | ||
844 | 841 | ||
845 | static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) | 842 | static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) |
846 | { | 843 | { |