diff options
author | Chris Mason <chris.mason@oracle.com> | 2010-06-11 14:26:02 -0400 |
---|---|---|
committer | Andy Grover <andy.grover@oracle.com> | 2010-09-08 21:15:30 -0400 |
commit | 7a0ff5dbdd0b4cb7ea8764da9d78f4bb2eebaf31 (patch) | |
tree | a3e6e1b1af5d63b9f7cb969d831724eecaa98cd5 /net | |
parent | eabb732279f1a41ac9d066aeb56973ae505c4cbc (diff) |
RDS: use delayed work for the FMR flushes
Using a delayed work queue helps us make sure a healthy number of FMRs
have queued up over the limit. It makes for a large improvement in RDMA
iops.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'net')
-rw-r--r-- | net/rds/ib_rdma.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index 8c40391de5a2..3a275af9d52f 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c | |||
@@ -66,7 +66,7 @@ struct rds_ib_mr { | |||
66 | */ | 66 | */ |
67 | struct rds_ib_mr_pool { | 67 | struct rds_ib_mr_pool { |
68 | struct mutex flush_lock; /* serialize fmr invalidate */ | 68 | struct mutex flush_lock; /* serialize fmr invalidate */ |
69 | struct work_struct flush_worker; /* flush worker */ | 69 | struct delayed_work flush_worker; /* flush worker */ |
70 | 70 | ||
71 | atomic_t item_count; /* total # of MRs */ | 71 | atomic_t item_count; /* total # of MRs */ |
72 | atomic_t dirty_count; /* # dirty of MRs */ | 72 | atomic_t dirty_count; /* # dirty of MRs */ |
@@ -226,7 +226,7 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev) | |||
226 | INIT_XLIST_HEAD(&pool->clean_list); | 226 | INIT_XLIST_HEAD(&pool->clean_list); |
227 | mutex_init(&pool->flush_lock); | 227 | mutex_init(&pool->flush_lock); |
228 | init_waitqueue_head(&pool->flush_wait); | 228 | init_waitqueue_head(&pool->flush_wait); |
229 | INIT_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker); | 229 | INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker); |
230 | 230 | ||
231 | pool->fmr_attr.max_pages = fmr_message_size; | 231 | pool->fmr_attr.max_pages = fmr_message_size; |
232 | pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps; | 232 | pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps; |
@@ -254,7 +254,7 @@ void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_co | |||
254 | 254 | ||
255 | void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) | 255 | void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) |
256 | { | 256 | { |
257 | cancel_work_sync(&pool->flush_worker); | 257 | cancel_delayed_work_sync(&pool->flush_worker); |
258 | rds_ib_flush_mr_pool(pool, 1, NULL); | 258 | rds_ib_flush_mr_pool(pool, 1, NULL); |
259 | WARN_ON(atomic_read(&pool->item_count)); | 259 | WARN_ON(atomic_read(&pool->item_count)); |
260 | WARN_ON(atomic_read(&pool->free_pinned)); | 260 | WARN_ON(atomic_read(&pool->free_pinned)); |
@@ -695,7 +695,7 @@ out_nolock: | |||
695 | 695 | ||
696 | static void rds_ib_mr_pool_flush_worker(struct work_struct *work) | 696 | static void rds_ib_mr_pool_flush_worker(struct work_struct *work) |
697 | { | 697 | { |
698 | struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker); | 698 | struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work); |
699 | 699 | ||
700 | rds_ib_flush_mr_pool(pool, 0, NULL); | 700 | rds_ib_flush_mr_pool(pool, 0, NULL); |
701 | } | 701 | } |
@@ -720,7 +720,7 @@ void rds_ib_free_mr(void *trans_private, int invalidate) | |||
720 | /* If we've pinned too many pages, request a flush */ | 720 | /* If we've pinned too many pages, request a flush */ |
721 | if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || | 721 | if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || |
722 | atomic_read(&pool->dirty_count) >= pool->max_items / 10) | 722 | atomic_read(&pool->dirty_count) >= pool->max_items / 10) |
723 | queue_work(rds_wq, &pool->flush_worker); | 723 | queue_delayed_work(rds_wq, &pool->flush_worker, 10); |
724 | 724 | ||
725 | if (invalidate) { | 725 | if (invalidate) { |
726 | if (likely(!in_interrupt())) { | 726 | if (likely(!in_interrupt())) { |
@@ -728,7 +728,7 @@ void rds_ib_free_mr(void *trans_private, int invalidate) | |||
728 | } else { | 728 | } else { |
729 | /* We get here if the user created a MR marked | 729 | /* We get here if the user created a MR marked |
730 | * as use_once and invalidate at the same time. */ | 730 | * as use_once and invalidate at the same time. */ |
731 | queue_work(rds_wq, &pool->flush_worker); | 731 | queue_delayed_work(rds_wq, &pool->flush_worker, 10); |
732 | } | 732 | } |
733 | } | 733 | } |
734 | 734 | ||