diff options
author | Tejun Heo <tj@kernel.org> | 2011-02-01 05:42:43 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2011-02-01 05:42:43 -0500 |
commit | c534a107e8fe446202b0fab102abc015c56c0317 (patch) | |
tree | 5cd90d564c5e2e653b8b2905f1f2b83a1b664d28 /net | |
parent | aa70c585b15f64da6948bdacc7a7692addd65364 (diff) |
rds/ib: use system_wq instead of rds_ib_fmr_wq
With cmwq, there's no reason to use dedicated rds_ib_fmr_wq - it's not
in the memory reclaim path and the maximum number of concurrent work
items is bound by the number of devices. Drop it and use system_wq
instead. This rds_ib_fmr_init/exit() noops. Both removed.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Andy Grover <andy.grover@oracle.com>
Diffstat (limited to 'net')
-rw-r--r-- | net/rds/ib.c | 9 | ||||
-rw-r--r-- | net/rds/ib.h | 2 | ||||
-rw-r--r-- | net/rds/ib_rdma.c | 27 |
3 files changed, 4 insertions, 34 deletions
diff --git a/net/rds/ib.c b/net/rds/ib.c index 4123967d4d65..cce19f95c624 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c | |||
@@ -364,7 +364,6 @@ void rds_ib_exit(void) | |||
364 | rds_ib_sysctl_exit(); | 364 | rds_ib_sysctl_exit(); |
365 | rds_ib_recv_exit(); | 365 | rds_ib_recv_exit(); |
366 | rds_trans_unregister(&rds_ib_transport); | 366 | rds_trans_unregister(&rds_ib_transport); |
367 | rds_ib_fmr_exit(); | ||
368 | } | 367 | } |
369 | 368 | ||
370 | struct rds_transport rds_ib_transport = { | 369 | struct rds_transport rds_ib_transport = { |
@@ -400,13 +399,9 @@ int rds_ib_init(void) | |||
400 | 399 | ||
401 | INIT_LIST_HEAD(&rds_ib_devices); | 400 | INIT_LIST_HEAD(&rds_ib_devices); |
402 | 401 | ||
403 | ret = rds_ib_fmr_init(); | ||
404 | if (ret) | ||
405 | goto out; | ||
406 | |||
407 | ret = ib_register_client(&rds_ib_client); | 402 | ret = ib_register_client(&rds_ib_client); |
408 | if (ret) | 403 | if (ret) |
409 | goto out_fmr_exit; | 404 | goto out; |
410 | 405 | ||
411 | ret = rds_ib_sysctl_init(); | 406 | ret = rds_ib_sysctl_init(); |
412 | if (ret) | 407 | if (ret) |
@@ -430,8 +425,6 @@ out_sysctl: | |||
430 | rds_ib_sysctl_exit(); | 425 | rds_ib_sysctl_exit(); |
431 | out_ibreg: | 426 | out_ibreg: |
432 | rds_ib_unregister_client(); | 427 | rds_ib_unregister_client(); |
433 | out_fmr_exit: | ||
434 | rds_ib_fmr_exit(); | ||
435 | out: | 428 | out: |
436 | return ret; | 429 | return ret; |
437 | } | 430 | } |
diff --git a/net/rds/ib.h b/net/rds/ib.h index e34ad032b66d..4297d92788dc 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h | |||
@@ -307,8 +307,6 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, | |||
307 | void rds_ib_sync_mr(void *trans_private, int dir); | 307 | void rds_ib_sync_mr(void *trans_private, int dir); |
308 | void rds_ib_free_mr(void *trans_private, int invalidate); | 308 | void rds_ib_free_mr(void *trans_private, int invalidate); |
309 | void rds_ib_flush_mrs(void); | 309 | void rds_ib_flush_mrs(void); |
310 | int rds_ib_fmr_init(void); | ||
311 | void rds_ib_fmr_exit(void); | ||
312 | 310 | ||
313 | /* ib_recv.c */ | 311 | /* ib_recv.c */ |
314 | int rds_ib_recv_init(void); | 312 | int rds_ib_recv_init(void); |
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index 18a833c450c8..819c35a0d9cb 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c | |||
@@ -38,8 +38,6 @@ | |||
38 | #include "ib.h" | 38 | #include "ib.h" |
39 | #include "xlist.h" | 39 | #include "xlist.h" |
40 | 40 | ||
41 | static struct workqueue_struct *rds_ib_fmr_wq; | ||
42 | |||
43 | static DEFINE_PER_CPU(unsigned long, clean_list_grace); | 41 | static DEFINE_PER_CPU(unsigned long, clean_list_grace); |
44 | #define CLEAN_LIST_BUSY_BIT 0 | 42 | #define CLEAN_LIST_BUSY_BIT 0 |
45 | 43 | ||
@@ -307,7 +305,7 @@ static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev) | |||
307 | int err = 0, iter = 0; | 305 | int err = 0, iter = 0; |
308 | 306 | ||
309 | if (atomic_read(&pool->dirty_count) >= pool->max_items / 10) | 307 | if (atomic_read(&pool->dirty_count) >= pool->max_items / 10) |
310 | queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10); | 308 | schedule_delayed_work(&pool->flush_worker, 10); |
311 | 309 | ||
312 | while (1) { | 310 | while (1) { |
313 | ibmr = rds_ib_reuse_fmr(pool); | 311 | ibmr = rds_ib_reuse_fmr(pool); |
@@ -696,24 +694,6 @@ out_nolock: | |||
696 | return ret; | 694 | return ret; |
697 | } | 695 | } |
698 | 696 | ||
699 | int rds_ib_fmr_init(void) | ||
700 | { | ||
701 | rds_ib_fmr_wq = create_workqueue("rds_fmr_flushd"); | ||
702 | if (!rds_ib_fmr_wq) | ||
703 | return -ENOMEM; | ||
704 | return 0; | ||
705 | } | ||
706 | |||
707 | /* | ||
708 | * By the time this is called all the IB devices should have been torn down and | ||
709 | * had their pools freed. As each pool is freed its work struct is waited on, | ||
710 | * so the pool flushing work queue should be idle by the time we get here. | ||
711 | */ | ||
712 | void rds_ib_fmr_exit(void) | ||
713 | { | ||
714 | destroy_workqueue(rds_ib_fmr_wq); | ||
715 | } | ||
716 | |||
717 | static void rds_ib_mr_pool_flush_worker(struct work_struct *work) | 697 | static void rds_ib_mr_pool_flush_worker(struct work_struct *work) |
718 | { | 698 | { |
719 | struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work); | 699 | struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work); |
@@ -741,7 +721,7 @@ void rds_ib_free_mr(void *trans_private, int invalidate) | |||
741 | /* If we've pinned too many pages, request a flush */ | 721 | /* If we've pinned too many pages, request a flush */ |
742 | if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || | 722 | if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || |
743 | atomic_read(&pool->dirty_count) >= pool->max_items / 10) | 723 | atomic_read(&pool->dirty_count) >= pool->max_items / 10) |
744 | queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10); | 724 | schedule_delayed_work(&pool->flush_worker, 10); |
745 | 725 | ||
746 | if (invalidate) { | 726 | if (invalidate) { |
747 | if (likely(!in_interrupt())) { | 727 | if (likely(!in_interrupt())) { |
@@ -749,8 +729,7 @@ void rds_ib_free_mr(void *trans_private, int invalidate) | |||
749 | } else { | 729 | } else { |
750 | /* We get here if the user created a MR marked | 730 | /* We get here if the user created a MR marked |
751 | * as use_once and invalidate at the same time. */ | 731 | * as use_once and invalidate at the same time. */ |
752 | queue_delayed_work(rds_ib_fmr_wq, | 732 | schedule_delayed_work(&pool->flush_worker, 10); |
753 | &pool->flush_worker, 10); | ||
754 | } | 733 | } |
755 | } | 734 | } |
756 | 735 | ||