diff options
author | Zach Brown <zach.brown@oracle.com> | 2010-07-06 18:09:56 -0400 |
---|---|---|
committer | Andy Grover <andy.grover@oracle.com> | 2010-09-08 21:16:34 -0400 |
commit | 515e079dab19cf774d1eec6e5f4ed65509e31ef1 (patch) | |
tree | 1dcc0f211946c47cd19b6823574aaa1a32982ffa /net/rds | |
parent | 8aeb1ba6630ffd44001ae9833842794df0107676 (diff) |
RDS/IB: create a work queue for FMR flushing
This patch moves the FMR flushing work in to its own mult-threaded work queue.
This is to maintain performance in preparation for returning the main krdsd
work queue back to a single threaded work queue to avoid deep-rooted
concurrency bugs.
This is also good because it further separates FMRs, which might be removed
some day, from the rest of the code base.
Signed-off-by: Zach Brown <zach.brown@oracle.com>
Diffstat (limited to 'net/rds')
-rw-r--r-- | net/rds/ib.c | 9 | ||||
-rw-r--r-- | net/rds/ib.h | 2 | ||||
-rw-r--r-- | net/rds/ib_rdma.c | 25 |
3 files changed, 33 insertions, 3 deletions
diff --git a/net/rds/ib.c b/net/rds/ib.c index af1ef18b6ff0..d2007b931616 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c | |||
@@ -351,6 +351,7 @@ void rds_ib_exit(void) | |||
351 | rds_ib_sysctl_exit(); | 351 | rds_ib_sysctl_exit(); |
352 | rds_ib_recv_exit(); | 352 | rds_ib_recv_exit(); |
353 | rds_trans_unregister(&rds_ib_transport); | 353 | rds_trans_unregister(&rds_ib_transport); |
354 | rds_ib_fmr_exit(); | ||
354 | } | 355 | } |
355 | 356 | ||
356 | struct rds_transport rds_ib_transport = { | 357 | struct rds_transport rds_ib_transport = { |
@@ -386,10 +387,14 @@ int __init rds_ib_init(void) | |||
386 | 387 | ||
387 | INIT_LIST_HEAD(&rds_ib_devices); | 388 | INIT_LIST_HEAD(&rds_ib_devices); |
388 | 389 | ||
389 | ret = ib_register_client(&rds_ib_client); | 390 | ret = rds_ib_fmr_init(); |
390 | if (ret) | 391 | if (ret) |
391 | goto out; | 392 | goto out; |
392 | 393 | ||
394 | ret = ib_register_client(&rds_ib_client); | ||
395 | if (ret) | ||
396 | goto out_fmr_exit; | ||
397 | |||
393 | ret = rds_ib_sysctl_init(); | 398 | ret = rds_ib_sysctl_init(); |
394 | if (ret) | 399 | if (ret) |
395 | goto out_ibreg; | 400 | goto out_ibreg; |
@@ -412,6 +417,8 @@ out_sysctl: | |||
412 | rds_ib_sysctl_exit(); | 417 | rds_ib_sysctl_exit(); |
413 | out_ibreg: | 418 | out_ibreg: |
414 | rds_ib_unregister_client(); | 419 | rds_ib_unregister_client(); |
420 | out_fmr_exit: | ||
421 | rds_ib_fmr_exit(); | ||
415 | out: | 422 | out: |
416 | return ret; | 423 | return ret; |
417 | } | 424 | } |
diff --git a/net/rds/ib.h b/net/rds/ib.h index e9f9ddf440ca..fd4ea69d2443 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h | |||
@@ -308,6 +308,8 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, | |||
308 | void rds_ib_sync_mr(void *trans_private, int dir); | 308 | void rds_ib_sync_mr(void *trans_private, int dir); |
309 | void rds_ib_free_mr(void *trans_private, int invalidate); | 309 | void rds_ib_free_mr(void *trans_private, int invalidate); |
310 | void rds_ib_flush_mrs(void); | 310 | void rds_ib_flush_mrs(void); |
311 | int __init rds_ib_fmr_init(void); | ||
312 | void __exit rds_ib_fmr_exit(void); | ||
311 | 313 | ||
312 | /* ib_recv.c */ | 314 | /* ib_recv.c */ |
313 | int __init rds_ib_recv_init(void); | 315 | int __init rds_ib_recv_init(void); |
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index 00f3995351c8..0eb597670c5b 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c | |||
@@ -691,6 +691,26 @@ out_nolock: | |||
691 | return ret; | 691 | return ret; |
692 | } | 692 | } |
693 | 693 | ||
694 | struct workqueue_struct *rds_ib_fmr_wq; | ||
695 | |||
696 | int __init rds_ib_fmr_init(void) | ||
697 | { | ||
698 | rds_ib_fmr_wq = create_workqueue("rds_fmr_flushd"); | ||
699 | if (!rds_ib_fmr_wq) | ||
700 | return -ENOMEM; | ||
701 | return 0; | ||
702 | } | ||
703 | |||
704 | /* | ||
705 | * By the time this is called all the IB devices should have been torn down and | ||
706 | * had their pools freed. As each pool is freed its work struct is waited on, | ||
707 | * so the pool flushing work queue should be idle by the time we get here. | ||
708 | */ | ||
709 | void __exit rds_ib_fmr_exit(void) | ||
710 | { | ||
711 | destroy_workqueue(rds_ib_fmr_wq); | ||
712 | } | ||
713 | |||
694 | static void rds_ib_mr_pool_flush_worker(struct work_struct *work) | 714 | static void rds_ib_mr_pool_flush_worker(struct work_struct *work) |
695 | { | 715 | { |
696 | struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work); | 716 | struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work); |
@@ -718,7 +738,7 @@ void rds_ib_free_mr(void *trans_private, int invalidate) | |||
718 | /* If we've pinned too many pages, request a flush */ | 738 | /* If we've pinned too many pages, request a flush */ |
719 | if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || | 739 | if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || |
720 | atomic_read(&pool->dirty_count) >= pool->max_items / 10) | 740 | atomic_read(&pool->dirty_count) >= pool->max_items / 10) |
721 | queue_delayed_work(rds_wq, &pool->flush_worker, 10); | 741 | queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10); |
722 | 742 | ||
723 | if (invalidate) { | 743 | if (invalidate) { |
724 | if (likely(!in_interrupt())) { | 744 | if (likely(!in_interrupt())) { |
@@ -726,7 +746,8 @@ void rds_ib_free_mr(void *trans_private, int invalidate) | |||
726 | } else { | 746 | } else { |
727 | /* We get here if the user created a MR marked | 747 | /* We get here if the user created a MR marked |
728 | * as use_once and invalidate at the same time. */ | 748 | * as use_once and invalidate at the same time. */ |
729 | queue_delayed_work(rds_wq, &pool->flush_worker, 10); | 749 | queue_delayed_work(rds_ib_fmr_wq, |
750 | &pool->flush_worker, 10); | ||
730 | } | 751 | } |
731 | } | 752 | } |
732 | 753 | ||