aboutsummaryrefslogtreecommitdiffstats
path: root/net/rds/ib_rdma.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-02-01 05:42:43 -0500
committerTejun Heo <tj@kernel.org>2011-02-01 05:42:43 -0500
commitc534a107e8fe446202b0fab102abc015c56c0317 (patch)
tree5cd90d564c5e2e653b8b2905f1f2b83a1b664d28 /net/rds/ib_rdma.c
parentaa70c585b15f64da6948bdacc7a7692addd65364 (diff)
rds/ib: use system_wq instead of rds_ib_fmr_wq
With cmwq, there's no reason to use dedicated rds_ib_fmr_wq - it's not in the memory reclaim path and the maximum number of concurrent work items is bound by the number of devices. Drop it and use system_wq instead. This rds_ib_fmr_init/exit() noops. Both removed. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Andy Grover <andy.grover@oracle.com>
Diffstat (limited to 'net/rds/ib_rdma.c')
-rw-r--r--net/rds/ib_rdma.c27
1 files changed, 3 insertions, 24 deletions
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 18a833c450c8..819c35a0d9cb 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -38,8 +38,6 @@
38#include "ib.h" 38#include "ib.h"
39#include "xlist.h" 39#include "xlist.h"
40 40
41static struct workqueue_struct *rds_ib_fmr_wq;
42
43static DEFINE_PER_CPU(unsigned long, clean_list_grace); 41static DEFINE_PER_CPU(unsigned long, clean_list_grace);
44#define CLEAN_LIST_BUSY_BIT 0 42#define CLEAN_LIST_BUSY_BIT 0
45 43
@@ -307,7 +305,7 @@ static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
307 int err = 0, iter = 0; 305 int err = 0, iter = 0;
308 306
309 if (atomic_read(&pool->dirty_count) >= pool->max_items / 10) 307 if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
310 queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10); 308 schedule_delayed_work(&pool->flush_worker, 10);
311 309
312 while (1) { 310 while (1) {
313 ibmr = rds_ib_reuse_fmr(pool); 311 ibmr = rds_ib_reuse_fmr(pool);
@@ -696,24 +694,6 @@ out_nolock:
696 return ret; 694 return ret;
697} 695}
698 696
699int rds_ib_fmr_init(void)
700{
701 rds_ib_fmr_wq = create_workqueue("rds_fmr_flushd");
702 if (!rds_ib_fmr_wq)
703 return -ENOMEM;
704 return 0;
705}
706
707/*
708 * By the time this is called all the IB devices should have been torn down and
709 * had their pools freed. As each pool is freed its work struct is waited on,
710 * so the pool flushing work queue should be idle by the time we get here.
711 */
712void rds_ib_fmr_exit(void)
713{
714 destroy_workqueue(rds_ib_fmr_wq);
715}
716
717static void rds_ib_mr_pool_flush_worker(struct work_struct *work) 697static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
718{ 698{
719 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work); 699 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
@@ -741,7 +721,7 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
741 /* If we've pinned too many pages, request a flush */ 721 /* If we've pinned too many pages, request a flush */
742 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || 722 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
743 atomic_read(&pool->dirty_count) >= pool->max_items / 10) 723 atomic_read(&pool->dirty_count) >= pool->max_items / 10)
744 queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10); 724 schedule_delayed_work(&pool->flush_worker, 10);
745 725
746 if (invalidate) { 726 if (invalidate) {
747 if (likely(!in_interrupt())) { 727 if (likely(!in_interrupt())) {
@@ -749,8 +729,7 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
749 } else { 729 } else {
750 /* We get here if the user created a MR marked 730 /* We get here if the user created a MR marked
751 * as use_once and invalidate at the same time. */ 731 * as use_once and invalidate at the same time. */
752 queue_delayed_work(rds_ib_fmr_wq, 732 schedule_delayed_work(&pool->flush_worker, 10);
753 &pool->flush_worker, 10);
754 } 733 }
755 } 734 }
756 735